hash
stringlengths
64
64
content
stringlengths
0
1.51M
ca29e29b37ea504c37af46836d1580b4628deb3cc6d6910d12d2bd090e1b7057
# Licensed under a 3-clause BSD style license - see LICENSE.rst from numpy import exp import astropy.units as u from astropy.cosmology.parameter import Parameter from astropy.cosmology.utils import aszarr from . import scalar_inv_efuncs from .base import FLRW __all__ = ["w0wzCDM"] __doctest_requires__ = {'*': ['scipy']} class w0wzCDM(FLRW): """ FLRW cosmology with a variable dark energy equation of state and curvature. The equation for the dark energy equation of state uses the simple form: :math:`w(z) = w_0 + w_z z`. This form is not recommended for z > 1. Parameters ---------- H0 : float or scalar quantity-like ['frequency'] Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]. Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. Ode0 : float Omega dark energy: density of dark energy in units of the critical density at z=0. w0 : float, optional Dark energy equation of state at z=0. This is pressure/density for dark energy in units where c=1. wz : float, optional Derivative of the dark energy equation of state with respect to z. A cosmological constant has w0=-1.0 and wz=0.0. Tcmb0 : float or scalar quantity-like ['temperature'], optional Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K]. Setting this to zero will turn off both photons and neutrinos (even massive ones). Neff : float, optional Effective number of Neutrino species. Default 3.04. m_nu : quantity-like ['energy', 'mass'] or array-like, optional Mass of each neutrino species in [eV] (mass-energy equivalency enabled). If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Typically this means you should provide three neutrino masses unless you are considering something like a sterile neutrino. Ob0 : float or None, optional Omega baryons: density of baryonic matter in units of the critical density at z=0. If this is set to None (the default), any computation that requires its value will raise an exception. name : str or None (optional, keyword-only) Name for this cosmological object. meta : mapping or None (optional, keyword-only) Metadata for the cosmology, e.g., a reference. Examples -------- >>> from astropy.cosmology import w0wzCDM >>> cosmo = w0wzCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wz=0.2) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) """ w0 = Parameter(doc="Dark energy equation of state at z=0.", fvalidate="float") wz = Parameter(doc="Derivative of the dark energy equation of state w.r.t. z.", fvalidate="float") def __init__(self, H0, Om0, Ode0, w0=-1.0, wz=0.0, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV, Ob0=None, *, name=None, meta=None): super().__init__(H0=H0, Om0=Om0, Ode0=Ode0, Tcmb0=Tcmb0, Neff=Neff, m_nu=m_nu, Ob0=Ob0, name=name, meta=meta) self.w0 = w0 self.wz = wz # Please see :ref:`astropy-cosmology-fast-integrals` for discussion # about what is being done here. if self._Tcmb0.value == 0: self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc_norel self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._w0, self._wz) elif not self._massivenu: self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc_nomnu self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._Ogamma0 + self._Onu0, self._w0, self._wz) else: self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._Ogamma0, self._neff_per_nu, self._nmasslessnu, self._nu_y_list, self._w0, self._wz) def w(self, z): r"""Returns dark energy equation of state at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- w : ndarray or float The dark energy equation of state. Returns `float` if the input is scalar. Notes ----- The dark energy equation of state is defined as :math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at redshift z and :math:`\rho(z)` is the density at redshift z, both in units where c=1. Here this is given by :math:`w(z) = w_0 + w_z z`. """ return self._w0 + self._wz * aszarr(z) def de_density_scale(self, z): r"""Evaluates the redshift dependence of the dark energy density. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- I : ndarray or float The scaling of the energy density of dark energy with redshift. Returns `float` if the input is scalar. Notes ----- The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`, and in this case is given by .. math:: I = \left(1 + z\right)^{3 \left(1 + w_0 - w_z\right)} \exp \left(-3 w_z z\right) """ z = aszarr(z) zp1 = z + 1.0 # (converts z [unit] -> z [dimensionless]) return zp1 ** (3. * (1. + self._w0 - self._wz)) * exp(-3. * self._wz * z)
b5927a4ad722a65127784ccd958d6e3c50b627c5ba13a9d2261ec6ecdb6421c6
# Licensed under a 3-clause BSD style license - see LICENSE.rst import warnings from abc import abstractmethod from math import exp, floor, log, pi, sqrt from numbers import Number import numpy as np from numpy import inf, sin import astropy.constants as const import astropy.units as u from astropy.cosmology.core import Cosmology, FlatCosmologyMixin from astropy.cosmology.parameter import Parameter, _validate_non_negative, _validate_with_unit from astropy.cosmology.utils import aszarr, vectorize_redshift_method from astropy.utils.compat.optional_deps import HAS_SCIPY from astropy.utils.exceptions import AstropyUserWarning # isort: split if HAS_SCIPY: from scipy.integrate import quad else: def quad(*args, **kwargs): raise ModuleNotFoundError("No module named 'scipy.integrate'") __all__ = ["FLRW", "FlatFLRWMixin"] __doctest_requires__ = {'*': ['scipy']} # Some conversion constants -- useful to compute them once here and reuse in # the initialization rather than have every object do them. H0units_to_invs = (u.km / (u.s * u.Mpc)).to(1.0 / u.s) sec_to_Gyr = u.s.to(u.Gyr) # const in critical density in cgs units (g cm^-3) critdens_const = (3 / (8 * pi * const.G)).cgs.value # angle conversions radian_in_arcsec = (1 * u.rad).to(u.arcsec) radian_in_arcmin = (1 * u.rad).to(u.arcmin) # Radiation parameter over c^2 in cgs (g cm^-3 K^-4) a_B_c2 = (4 * const.sigma_sb / const.c ** 3).cgs.value # Boltzmann constant in eV / K kB_evK = const.k_B.to(u.eV / u.K) class FLRW(Cosmology): """ A class describing an isotropic and homogeneous (Friedmann-Lemaitre-Robertson-Walker) cosmology. This is an abstract base class -- you cannot instantiate examples of this class, but must work with one of its subclasses, such as :class:`~astropy.cosmology.LambdaCDM` or :class:`~astropy.cosmology.wCDM`. Parameters ---------- H0 : float or scalar quantity-like ['frequency'] Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]. Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. Note that this does not include massive neutrinos. Ode0 : float Omega dark energy: density of dark energy in units of the critical density at z=0. Tcmb0 : float or scalar quantity-like ['temperature'], optional Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K]. Setting this to zero will turn off both photons and neutrinos (even massive ones). Neff : float, optional Effective number of Neutrino species. Default 3.04. m_nu : quantity-like ['energy', 'mass'] or array-like, optional Mass of each neutrino species in [eV] (mass-energy equivalency enabled). If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Typically this means you should provide three neutrino masses unless you are considering something like a sterile neutrino. Ob0 : float or None, optional Omega baryons: density of baryonic matter in units of the critical density at z=0. If this is set to None (the default), any computation that requires its value will raise an exception. name : str or None (optional, keyword-only) Name for this cosmological object. meta : mapping or None (optional, keyword-only) Metadata for the cosmology, e.g., a reference. Notes ----- Class instances are immutable -- you cannot change the parameters' values. That is, all of the above attributes (except meta) are read only. For details on how to create performant custom subclasses, see the documentation on :ref:`astropy-cosmology-fast-integrals`. """ H0 = Parameter(doc="Hubble constant as an `~astropy.units.Quantity` at z=0.", unit="km/(s Mpc)", fvalidate="scalar") Om0 = Parameter(doc="Omega matter; matter density/critical density at z=0.", fvalidate="non-negative") Ode0 = Parameter(doc="Omega dark energy; dark energy density/critical density at z=0.", fvalidate="float") Tcmb0 = Parameter(doc="Temperature of the CMB as `~astropy.units.Quantity` at z=0.", unit="Kelvin", fvalidate="scalar") Neff = Parameter(doc="Number of effective neutrino species.", fvalidate="non-negative") m_nu = Parameter(doc="Mass of neutrino species.", unit="eV", equivalencies=u.mass_energy()) Ob0 = Parameter(doc="Omega baryon; baryonic matter density/critical density at z=0.") def __init__(self, H0, Om0, Ode0, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV, Ob0=None, *, name=None, meta=None): super().__init__(name=name, meta=meta) # Assign (and validate) Parameters self.H0 = H0 self.Om0 = Om0 self.Ode0 = Ode0 self.Tcmb0 = Tcmb0 self.Neff = Neff self.m_nu = m_nu # (reset later, this is just for unit validation) self.Ob0 = Ob0 # (must be after Om0) # Derived quantities: # Dark matter density; matter - baryons, if latter is not None. self._Odm0 = None if Ob0 is None else (self._Om0 - self._Ob0) # 100 km/s/Mpc * h = H0 (so h is dimensionless) self._h = self._H0.value / 100.0 # Hubble distance self._hubble_distance = (const.c / self._H0).to(u.Mpc) # H0 in s^-1 H0_s = self._H0.value * H0units_to_invs # Hubble time self._hubble_time = (sec_to_Gyr / H0_s) << u.Gyr # Critical density at z=0 (grams per cubic cm) cd0value = critdens_const * H0_s ** 2 self._critical_density0 = cd0value << u.g / u.cm ** 3 # Compute photon density from Tcmb self._Ogamma0 = a_B_c2 * self._Tcmb0.value ** 4 / self._critical_density0.value # Compute Neutrino temperature: # The constant in front is (4/11)^1/3 -- see any cosmology book for an # explanation -- for example, Weinberg 'Cosmology' p 154 eq (3.1.21). self._Tnu0 = 0.7137658555036082 * self._Tcmb0 # Compute neutrino parameters: if self._m_nu is None: self._nneutrinos = 0 self._neff_per_nu = None self._massivenu = False self._massivenu_mass = None self._nmassivenu = self._nmasslessnu = None else: self._nneutrinos = floor(self._Neff) # We are going to share Neff between the neutrinos equally. In # detail this is not correct, but it is a standard assumption # because properly calculating it is a) complicated b) depends on # the details of the massive neutrinos (e.g., their weak # interactions, which could be unusual if one is considering # sterile neutrinos). self._neff_per_nu = self._Neff / self._nneutrinos # Now figure out if we have massive neutrinos to deal with, and if # so, get the right number of masses. It is worth keeping track of # massless ones separately (since they are easy to deal with, and a # common use case is to have only one massive neutrino). massive = np.nonzero(self._m_nu.value > 0)[0] self._massivenu = massive.size > 0 self._nmassivenu = len(massive) self._massivenu_mass = self._m_nu[massive].value if self._massivenu else None self._nmasslessnu = self._nneutrinos - self._nmassivenu # Compute Neutrino Omega and total relativistic component for massive # neutrinos. We also store a list version, since that is more efficient # to do integrals with (perhaps surprisingly! But small python lists # are more efficient than small NumPy arrays). if self._massivenu: # (`_massivenu` set in `m_nu`) nu_y = self._massivenu_mass / (kB_evK * self._Tnu0) self._nu_y = nu_y.value self._nu_y_list = self._nu_y.tolist() self._Onu0 = self._Ogamma0 * self.nu_relative_density(0) else: # This case is particularly simple, so do it directly The 0.2271... # is 7/8 (4/11)^(4/3) -- the temperature bit ^4 (blackbody energy # density) times 7/8 for FD vs. BE statistics. self._Onu0 = 0.22710731766 * self._Neff * self._Ogamma0 self._nu_y = self._nu_y_list = None # Compute curvature density self._Ok0 = 1.0 - self._Om0 - self._Ode0 - self._Ogamma0 - self._Onu0 # Subclasses should override this reference if they provide # more efficient scalar versions of inv_efunc. self._inv_efunc_scalar = self.inv_efunc self._inv_efunc_scalar_args = () # --------------------------------------------------------------- # Parameter details @Ob0.validator def Ob0(self, param, value): """Validate baryon density to None or positive float > matter density.""" if value is None: return value value = _validate_non_negative(self, param, value) if value > self.Om0: raise ValueError("baryonic density can not be larger than total matter density.") return value @m_nu.validator def m_nu(self, param, value): """Validate neutrino masses to right value, units, and shape. There are no neutrinos if floor(Neff) or Tcmb0 are 0. The number of neutrinos must match floor(Neff). Neutrino masses cannot be negative. """ # Check if there are any neutrinos if (nneutrinos := floor(self._Neff)) == 0 or self._Tcmb0.value == 0: return None # None, regardless of input # Validate / set units value = _validate_with_unit(self, param, value) # Check values and data shapes if value.shape not in ((), (nneutrinos,)): raise ValueError("unexpected number of neutrino masses — " f"expected {nneutrinos}, got {len(value)}.") elif np.any(value.value < 0): raise ValueError("invalid (negative) neutrino mass encountered.") # scalar -> array if value.isscalar: value = np.full_like(value, value, shape=nneutrinos) return value # --------------------------------------------------------------- # properties @property def is_flat(self): """Return bool; `True` if the cosmology is flat.""" return bool((self._Ok0 == 0.0) and (self.Otot0 == 1.0)) @property def Otot0(self): """Omega total; the total density/critical density at z=0.""" return self._Om0 + self._Ogamma0 + self._Onu0 + self._Ode0 + self._Ok0 @property def Odm0(self): """Omega dark matter; dark matter density/critical density at z=0.""" return self._Odm0 @property def Ok0(self): """Omega curvature; the effective curvature density/critical density at z=0.""" return self._Ok0 @property def Tnu0(self): """Temperature of the neutrino background as `~astropy.units.Quantity` at z=0.""" return self._Tnu0 @property def has_massive_nu(self): """Does this cosmology have at least one massive neutrino species?""" if self._Tnu0.value == 0: return False return self._massivenu @property def h(self): """Dimensionless Hubble constant: h = H_0 / 100 [km/sec/Mpc].""" return self._h @property def hubble_time(self): """Hubble time as `~astropy.units.Quantity`.""" return self._hubble_time @property def hubble_distance(self): """Hubble distance as `~astropy.units.Quantity`.""" return self._hubble_distance @property def critical_density0(self): """Critical density as `~astropy.units.Quantity` at z=0.""" return self._critical_density0 @property def Ogamma0(self): """Omega gamma; the density/critical density of photons at z=0.""" return self._Ogamma0 @property def Onu0(self): """Omega nu; the density/critical density of neutrinos at z=0.""" return self._Onu0 # --------------------------------------------------------------- @abstractmethod def w(self, z): r"""The dark energy equation of state. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- w : ndarray or float The dark energy equation of state. `float` if scalar input. Notes ----- The dark energy equation of state is defined as :math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at redshift z and :math:`\rho(z)` is the density at redshift z, both in units where c=1. This must be overridden by subclasses. """ raise NotImplementedError("w(z) is not implemented") def Otot(self, z): """The total density parameter at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshifts. Returns ------- Otot : ndarray or float The total density relative to the critical density at each redshift. Returns float if input scalar. """ return self.Om(z) + self.Ogamma(z) + self.Onu(z) + self.Ode(z) + self.Ok(z) def Om(self, z): """ Return the density parameter for non-relativistic matter at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- Om : ndarray or float The density of non-relativistic matter relative to the critical density at each redshift. Returns `float` if the input is scalar. Notes ----- This does not include neutrinos, even if non-relativistic at the redshift of interest; see `Onu`. """ z = aszarr(z) return self._Om0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2 def Ob(self, z): """Return the density parameter for baryonic matter at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- Ob : ndarray or float The density of baryonic matter relative to the critical density at each redshift. Returns `float` if the input is scalar. Raises ------ ValueError If ``Ob0`` is `None`. """ if self._Ob0 is None: raise ValueError("Baryon density not set for this cosmology") z = aszarr(z) return self._Ob0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2 def Odm(self, z): """Return the density parameter for dark matter at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- Odm : ndarray or float The density of non-relativistic dark matter relative to the critical density at each redshift. Returns `float` if the input is scalar. Raises ------ ValueError If ``Ob0`` is `None`. Notes ----- This does not include neutrinos, even if non-relativistic at the redshift of interest. """ if self._Odm0 is None: raise ValueError("Baryonic density not set for this cosmology, " "unclear meaning of dark matter density") z = aszarr(z) return self._Odm0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2 def Ok(self, z): """ Return the equivalent density parameter for curvature at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- Ok : ndarray or float The equivalent density parameter for curvature at each redshift. Returns `float` if the input is scalar. """ z = aszarr(z) if self._Ok0 == 0: # Common enough to be worth checking explicitly return np.zeros(z.shape) if hasattr(z, "shape") else 0.0 return self._Ok0 * (z + 1.0) ** 2 * self.inv_efunc(z) ** 2 def Ode(self, z): """Return the density parameter for dark energy at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- Ode : ndarray or float The density of non-relativistic matter relative to the critical density at each redshift. Returns `float` if the input is scalar. """ z = aszarr(z) if self._Ode0 == 0: # Common enough to be worth checking explicitly return np.zeros(z.shape) if hasattr(z, "shape") else 0.0 return self._Ode0 * self.de_density_scale(z) * self.inv_efunc(z) ** 2 def Ogamma(self, z): """Return the density parameter for photons at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- Ogamma : ndarray or float The energy density of photons relative to the critical density at each redshift. Returns `float` if the input is scalar. """ z = aszarr(z) return self._Ogamma0 * (z + 1.0) ** 4 * self.inv_efunc(z) ** 2 def Onu(self, z): r"""Return the density parameter for neutrinos at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- Onu : ndarray or float The energy density of neutrinos relative to the critical density at each redshift. Note that this includes their kinetic energy (if they have mass), so it is not equal to the commonly used :math:`\sum \frac{m_{\nu}}{94 eV}`, which does not include kinetic energy. Returns `float` if the input is scalar. """ z = aszarr(z) if self._Onu0 == 0: # Common enough to be worth checking explicitly return np.zeros(z.shape) if hasattr(z, "shape") else 0.0 return self.Ogamma(z) * self.nu_relative_density(z) def Tcmb(self, z): """Return the CMB temperature at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- Tcmb : `~astropy.units.Quantity` ['temperature'] The temperature of the CMB in K. """ return self._Tcmb0 * (aszarr(z) + 1.0) def Tnu(self, z): """Return the neutrino temperature at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- Tnu : `~astropy.units.Quantity` ['temperature'] The temperature of the cosmic neutrino background in K. """ return self._Tnu0 * (aszarr(z) + 1.0) def nu_relative_density(self, z): r"""Neutrino density function relative to the energy density in photons. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- f : ndarray or float The neutrino density scaling factor relative to the density in photons at each redshift. Only returns `float` if z is scalar. Notes ----- The density in neutrinos is given by .. math:: \rho_{\nu} \left(a\right) = 0.2271 \, N_{eff} \, f\left(m_{\nu} a / T_{\nu 0} \right) \, \rho_{\gamma} \left( a \right) where .. math:: f \left(y\right) = \frac{120}{7 \pi^4} \int_0^{\infty} \, dx \frac{x^2 \sqrt{x^2 + y^2}} {e^x + 1} assuming that all neutrino species have the same mass. If they have different masses, a similar term is calculated for each one. Note that ``f`` has the asymptotic behavior :math:`f(0) = 1`. This method returns :math:`0.2271 f` using an analytical fitting formula given in Komatsu et al. 2011, ApJS 192, 18. """ # Note that there is also a scalar-z-only cython implementation of # this in scalar_inv_efuncs.pyx, so if you find a problem in this # you need to update there too. # See Komatsu et al. 2011, eq 26 and the surrounding discussion # for an explanation of what we are doing here. # However, this is modified to handle multiple neutrino masses # by computing the above for each mass, then summing prefac = 0.22710731766 # 7/8 (4/11)^4/3 -- see any cosmo book # The massive and massless contribution must be handled separately # But check for common cases first z = aszarr(z) if not self._massivenu: return prefac * self._Neff * (np.ones(z.shape) if hasattr(z, "shape") else 1.0) # These are purely fitting constants -- see the Komatsu paper p = 1.83 invp = 0.54644808743 # 1.0 / p k = 0.3173 curr_nu_y = self._nu_y / (1. + np.expand_dims(z, axis=-1)) rel_mass_per = (1.0 + (k * curr_nu_y) ** p) ** invp rel_mass = rel_mass_per.sum(-1) + self._nmasslessnu return prefac * self._neff_per_nu * rel_mass def _w_integrand(self, ln1pz): """Internal convenience function for w(z) integral (eq. 5 of [1]_). Parameters ---------- ln1pz : `~numbers.Number` or scalar ndarray Assumes scalar input, since this should only be called inside an integral. References ---------- .. [1] Linder, E. (2003). Exploring the Expansion History of the Universe. Phys. Rev. Lett., 90, 091301. """ return 1.0 + self.w(exp(ln1pz) - 1.0) def de_density_scale(self, z): r"""Evaluates the redshift dependence of the dark energy density. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- I : ndarray or float The scaling of the energy density of dark energy with redshift. Returns `float` if the input is scalar. Notes ----- The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`, and is given by .. math:: I = \exp \left( 3 \int_{a}^1 \frac{ da^{\prime} }{ a^{\prime} } \left[ 1 + w\left( a^{\prime} \right) \right] \right) The actual integral used is rewritten from [1]_ to be in terms of z. It will generally helpful for subclasses to overload this method if the integral can be done analytically for the particular dark energy equation of state that they implement. References ---------- .. [1] Linder, E. (2003). Exploring the Expansion History of the Universe. Phys. Rev. Lett., 90, 091301. """ # This allows for an arbitrary w(z) following eq (5) of # Linder 2003, PRL 90, 91301. The code here evaluates # the integral numerically. However, most popular # forms of w(z) are designed to make this integral analytic, # so it is probably a good idea for subclasses to overload this # method if an analytic form is available. z = aszarr(z) if not isinstance(z, (Number, np.generic)): # array/Quantity ival = np.array([quad(self._w_integrand, 0, log(1 + redshift))[0] for redshift in z]) return np.exp(3 * ival) else: # scalar ival = quad(self._w_integrand, 0, log(z + 1.0))[0] return exp(3 * ival) def efunc(self, z): """Function used to calculate H(z), the Hubble parameter. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- E : ndarray or float The redshift scaling of the Hubble constant. Returns `float` if the input is scalar. Defined such that :math:`H(z) = H_0 E(z)`. Notes ----- It is not necessary to override this method, but if de_density_scale takes a particularly simple form, it may be advantageous to. """ Or = self._Ogamma0 + (self._Onu0 if not self._massivenu else self._Ogamma0 * self.nu_relative_density(z)) zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless]) return np.sqrt(zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) + self._Ode0 * self.de_density_scale(z)) def inv_efunc(self, z): """Inverse of ``efunc``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- E : ndarray or float The redshift scaling of the inverse Hubble constant. Returns `float` if the input is scalar. """ # Avoid the function overhead by repeating code Or = self._Ogamma0 + (self._Onu0 if not self._massivenu else self._Ogamma0 * self.nu_relative_density(z)) zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless]) return (zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) + self._Ode0 * self.de_density_scale(z))**(-0.5) def _lookback_time_integrand_scalar(self, z): """Integrand of the lookback time (equation 30 of [1]_). Parameters ---------- z : float Input redshift. Returns ------- I : float The integrand for the lookback time. References ---------- .. [1] Hogg, D. (1999). Distance measures in cosmology, section 11. arXiv e-prints, astro-ph/9905116. """ return self._inv_efunc_scalar(z, *self._inv_efunc_scalar_args) / (z + 1.0) def lookback_time_integrand(self, z): """Integrand of the lookback time (equation 30 of [1]_). Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- I : float or array The integrand for the lookback time. References ---------- .. [1] Hogg, D. (1999). Distance measures in cosmology, section 11. arXiv e-prints, astro-ph/9905116. """ z = aszarr(z) return self.inv_efunc(z) / (z + 1.0) def _abs_distance_integrand_scalar(self, z): """Integrand of the absorption distance [1]_. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- X : float The integrand for the absorption distance. References ---------- .. [1] Hogg, D. (1999). Distance measures in cosmology, section 11. arXiv e-prints, astro-ph/9905116. """ args = self._inv_efunc_scalar_args return (z + 1.0) ** 2 * self._inv_efunc_scalar(z, *args) def abs_distance_integrand(self, z): """Integrand of the absorption distance [1]_. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- X : float or array The integrand for the absorption distance. References ---------- .. [1] Hogg, D. (1999). Distance measures in cosmology, section 11. arXiv e-prints, astro-ph/9905116. """ z = aszarr(z) return (z + 1.0) ** 2 * self.inv_efunc(z) def H(self, z): """Hubble parameter (km/s/Mpc) at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- H : `~astropy.units.Quantity` ['frequency'] Hubble parameter at each input redshift. """ return self._H0 * self.efunc(z) def scale_factor(self, z): """Scale factor at redshift ``z``. The scale factor is defined as :math:`a = 1 / (1 + z)`. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- a : ndarray or float Scale factor at each input redshift. Returns `float` if the input is scalar. """ return 1.0 / (aszarr(z) + 1.0) def lookback_time(self, z): """Lookback time in Gyr to redshift ``z``. The lookback time is the difference between the age of the Universe now and the age at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- t : `~astropy.units.Quantity` ['time'] Lookback time in Gyr to each input redshift. See Also -------- z_at_value : Find the redshift corresponding to a lookback time. """ return self._lookback_time(z) def _lookback_time(self, z): """Lookback time in Gyr to redshift ``z``. The lookback time is the difference between the age of the Universe now and the age at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- t : `~astropy.units.Quantity` ['time'] Lookback time in Gyr to each input redshift. """ return self._hubble_time * self._integral_lookback_time(z) @vectorize_redshift_method def _integral_lookback_time(self, z, /): """Lookback time to redshift ``z``. Value in units of Hubble time. The lookback time is the difference between the age of the Universe now and the age at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- t : float or ndarray Lookback time to each input redshift in Hubble time units. Returns `float` if input scalar, `~numpy.ndarray` otherwise. """ return quad(self._lookback_time_integrand_scalar, 0, z)[0] def lookback_distance(self, z): """ The lookback distance is the light travel time distance to a given redshift. It is simply c * lookback_time. It may be used to calculate the proper distance between two redshifts, e.g. for the mean free path to ionizing radiation. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- d : `~astropy.units.Quantity` ['length'] Lookback distance in Mpc """ return (self.lookback_time(z) * const.c).to(u.Mpc) def age(self, z): """Age of the universe in Gyr at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- t : `~astropy.units.Quantity` ['time'] The age of the universe in Gyr at each input redshift. See Also -------- z_at_value : Find the redshift corresponding to an age. """ return self._age(z) def _age(self, z): """Age of the universe in Gyr at redshift ``z``. This internal function exists to be re-defined for optimizations. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- t : `~astropy.units.Quantity` ['time'] The age of the universe in Gyr at each input redshift. """ return self._hubble_time * self._integral_age(z) @vectorize_redshift_method def _integral_age(self, z, /): """Age of the universe at redshift ``z``. Value in units of Hubble time. Calculated using explicit integration. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- t : float or ndarray The age of the universe at each input redshift in Hubble time units. Returns `float` if input scalar, `~numpy.ndarray` otherwise. See Also -------- z_at_value : Find the redshift corresponding to an age. """ return quad(self._lookback_time_integrand_scalar, z, inf)[0] def critical_density(self, z): """Critical density in grams per cubic cm at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- rho : `~astropy.units.Quantity` Critical density in g/cm^3 at each input redshift. """ return self._critical_density0 * (self.efunc(z)) ** 2 def comoving_distance(self, z): """Comoving line-of-sight distance in Mpc at a given redshift. The comoving distance along the line-of-sight between two objects remains constant with time for objects in the Hubble flow. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- d : `~astropy.units.Quantity` ['length'] Comoving distance in Mpc to each input redshift. """ return self._comoving_distance_z1z2(0, z) def _comoving_distance_z1z2(self, z1, z2): """ Comoving line-of-sight distance in Mpc between objects at redshifts ``z1`` and ``z2``. The comoving distance along the line-of-sight between two objects remains constant with time for objects in the Hubble flow. Parameters ---------- z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshifts. Returns ------- d : `~astropy.units.Quantity` ['length'] Comoving distance in Mpc between each input redshift. """ return self._integral_comoving_distance_z1z2(z1, z2) @vectorize_redshift_method(nin=2) def _integral_comoving_distance_z1z2_scalar(self, z1, z2, /): """ Comoving line-of-sight distance between objects at redshifts ``z1`` and ``z2``. Value in Mpc. The comoving distance along the line-of-sight between two objects remains constant with time for objects in the Hubble flow. Parameters ---------- z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshifts. Returns ------- d : float or ndarray Comoving distance in Mpc between each input redshift. Returns `float` if input scalar, `~numpy.ndarray` otherwise. """ return quad(self._inv_efunc_scalar, z1, z2, args=self._inv_efunc_scalar_args)[0] def _integral_comoving_distance_z1z2(self, z1, z2): """ Comoving line-of-sight distance in Mpc between objects at redshifts ``z1`` and ``z2``. The comoving distance along the line-of-sight between two objects remains constant with time for objects in the Hubble flow. Parameters ---------- z1, z2 : Quantity-like ['redshift'] or array-like Input redshifts. Returns ------- d : `~astropy.units.Quantity` ['length'] Comoving distance in Mpc between each input redshift. """ return self._hubble_distance * self._integral_comoving_distance_z1z2_scalar(z1, z2) def comoving_transverse_distance(self, z): r"""Comoving transverse distance in Mpc at a given redshift. This value is the transverse comoving distance at redshift ``z`` corresponding to an angular separation of 1 radian. This is the same as the comoving distance if :math:`\Omega_k` is zero (as in the current concordance Lambda-CDM model). Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- d : `~astropy.units.Quantity` ['length'] Comoving transverse distance in Mpc at each input redshift. Notes ----- This quantity is also called the 'proper motion distance' in some texts. """ return self._comoving_transverse_distance_z1z2(0, z) def _comoving_transverse_distance_z1z2(self, z1, z2): r"""Comoving transverse distance in Mpc between two redshifts. This value is the transverse comoving distance at redshift ``z2`` as seen from redshift ``z1`` corresponding to an angular separation of 1 radian. This is the same as the comoving distance if :math:`\Omega_k` is zero (as in the current concordance Lambda-CDM model). Parameters ---------- z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshifts. Returns ------- d : `~astropy.units.Quantity` ['length'] Comoving transverse distance in Mpc between input redshift. Notes ----- This quantity is also called the 'proper motion distance' in some texts. """ Ok0 = self._Ok0 dc = self._comoving_distance_z1z2(z1, z2) if Ok0 == 0: return dc sqrtOk0 = sqrt(abs(Ok0)) dh = self._hubble_distance if Ok0 > 0: return dh / sqrtOk0 * np.sinh(sqrtOk0 * dc.value / dh.value) else: return dh / sqrtOk0 * sin(sqrtOk0 * dc.value / dh.value) def angular_diameter_distance(self, z): """Angular diameter distance in Mpc at a given redshift. This gives the proper (sometimes called 'physical') transverse distance corresponding to an angle of 1 radian for an object at redshift ``z`` ([1]_, [2]_, [3]_). Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- d : `~astropy.units.Quantity` ['length'] Angular diameter distance in Mpc at each input redshift. References ---------- .. [1] Weinberg, 1972, pp 420-424; Weedman, 1986, pp 421-424. .. [2] Weedman, D. (1986). Quasar astronomy, pp 65-67. .. [3] Peebles, P. (1993). Principles of Physical Cosmology, pp 325-327. """ z = aszarr(z) return self.comoving_transverse_distance(z) / (z + 1.0) def luminosity_distance(self, z): """Luminosity distance in Mpc at redshift ``z``. This is the distance to use when converting between the bolometric flux from an object at redshift ``z`` and its bolometric luminosity [1]_. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- d : `~astropy.units.Quantity` ['length'] Luminosity distance in Mpc at each input redshift. See Also -------- z_at_value : Find the redshift corresponding to a luminosity distance. References ---------- .. [1] Weinberg, 1972, pp 420-424; Weedman, 1986, pp 60-62. """ z = aszarr(z) return (z + 1.0) * self.comoving_transverse_distance(z) def angular_diameter_distance_z1z2(self, z1, z2): """Angular diameter distance between objects at 2 redshifts. Useful for gravitational lensing, for example computing the angular diameter distance between a lensed galaxy and the foreground lens. Parameters ---------- z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshifts. For most practical applications such as gravitational lensing, ``z2`` should be larger than ``z1``. The method will work for ``z2 < z1``; however, this will return negative distances. Returns ------- d : `~astropy.units.Quantity` The angular diameter distance between each input redshift pair. Returns scalar if input is scalar, array else-wise. """ z1, z2 = aszarr(z1), aszarr(z2) if np.any(z2 < z1): warnings.warn(f"Second redshift(s) z2 ({z2}) is less than first " f"redshift(s) z1 ({z1}).", AstropyUserWarning) return self._comoving_transverse_distance_z1z2(z1, z2) / (z2 + 1.0) @vectorize_redshift_method def absorption_distance(self, z, /): """Absorption distance at redshift ``z``. This is used to calculate the number of objects with some cross section of absorption and number density intersecting a sightline per unit redshift path ([1]_, [2]_). Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- d : float or ndarray Absorption distance (dimensionless) at each input redshift. Returns `float` if input scalar, `~numpy.ndarray` otherwise. References ---------- .. [1] Hogg, D. (1999). Distance measures in cosmology, section 11. arXiv e-prints, astro-ph/9905116. .. [2] Bahcall, John N. and Peebles, P.J.E. 1969, ApJ, 156L, 7B """ return quad(self._abs_distance_integrand_scalar, 0, z)[0] def distmod(self, z): """Distance modulus at redshift ``z``. The distance modulus is defined as the (apparent magnitude - absolute magnitude) for an object at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- distmod : `~astropy.units.Quantity` ['length'] Distance modulus at each input redshift, in magnitudes. See Also -------- z_at_value : Find the redshift corresponding to a distance modulus. """ # Remember that the luminosity distance is in Mpc # Abs is necessary because in certain obscure closed cosmologies # the distance modulus can be negative -- which is okay because # it enters as the square. val = 5. * np.log10(abs(self.luminosity_distance(z).value)) + 25.0 return u.Quantity(val, u.mag) def comoving_volume(self, z): r"""Comoving volume in cubic Mpc at redshift ``z``. This is the volume of the universe encompassed by redshifts less than ``z``. For the case of :math:`\Omega_k = 0` it is a sphere of radius `comoving_distance` but it is less intuitive if :math:`\Omega_k` is not. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- V : `~astropy.units.Quantity` Comoving volume in :math:`Mpc^3` at each input redshift. """ Ok0 = self._Ok0 if Ok0 == 0: return 4.0 / 3.0 * pi * self.comoving_distance(z) ** 3 dh = self._hubble_distance.value # .value for speed dm = self.comoving_transverse_distance(z).value term1 = 4.0 * pi * dh ** 3 / (2.0 * Ok0) * u.Mpc ** 3 term2 = dm / dh * np.sqrt(1 + Ok0 * (dm / dh) ** 2) term3 = sqrt(abs(Ok0)) * dm / dh if Ok0 > 0: return term1 * (term2 - 1. / sqrt(abs(Ok0)) * np.arcsinh(term3)) else: return term1 * (term2 - 1. / sqrt(abs(Ok0)) * np.arcsin(term3)) def differential_comoving_volume(self, z): """Differential comoving volume at redshift z. Useful for calculating the effective comoving volume. For example, allows for integration over a comoving volume that has a sensitivity function that changes with redshift. The total comoving volume is given by integrating ``differential_comoving_volume`` to redshift ``z`` and multiplying by a solid angle. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- dV : `~astropy.units.Quantity` Differential comoving volume per redshift per steradian at each input redshift. """ dm = self.comoving_transverse_distance(z) return self._hubble_distance * (dm ** 2.0) / (self.efunc(z) << u.steradian) def kpc_comoving_per_arcmin(self, z): """ Separation in transverse comoving kpc corresponding to an arcminute at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- d : `~astropy.units.Quantity` ['length'] The distance in comoving kpc corresponding to an arcmin at each input redshift. """ return self.comoving_transverse_distance(z).to(u.kpc) / radian_in_arcmin def kpc_proper_per_arcmin(self, z): """ Separation in transverse proper kpc corresponding to an arcminute at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- d : `~astropy.units.Quantity` ['length'] The distance in proper kpc corresponding to an arcmin at each input redshift. """ return self.angular_diameter_distance(z).to(u.kpc) / radian_in_arcmin def arcsec_per_kpc_comoving(self, z): """ Angular separation in arcsec corresponding to a comoving kpc at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- theta : `~astropy.units.Quantity` ['angle'] The angular separation in arcsec corresponding to a comoving kpc at each input redshift. """ return radian_in_arcsec / self.comoving_transverse_distance(z).to(u.kpc) def arcsec_per_kpc_proper(self, z): """ Angular separation in arcsec corresponding to a proper kpc at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- theta : `~astropy.units.Quantity` ['angle'] The angular separation in arcsec corresponding to a proper kpc at each input redshift. """ return radian_in_arcsec / self.angular_diameter_distance(z).to(u.kpc) class FlatFLRWMixin(FlatCosmologyMixin): """ Mixin class for flat FLRW cosmologies. Do NOT instantiate directly. Must precede the base class in the multiple-inheritance so that this mixin's ``__init__`` proceeds the base class'. Note that all instances of ``FlatFLRWMixin`` are flat, but not all flat cosmologies are instances of ``FlatFLRWMixin``. As example, ``LambdaCDM`` **may** be flat (for the a specific set of parameter values), but ``FlatLambdaCDM`` **will** be flat. """ Ode0 = FLRW.Ode0.clone(derived=True) # same as FLRW, but now a derived param. def __init_subclass__(cls): super().__init_subclass__() if "Ode0" in cls._init_signature.parameters: raise TypeError("subclasses of `FlatFLRWMixin` cannot have `Ode0` in `__init__`") def __init__(self, *args, **kw): super().__init__(*args, **kw) # guaranteed not to have `Ode0` # Do some twiddling after the fact to get flatness self._Ok0 = 0.0 self._Ode0 = 1.0 - (self._Om0 + self._Ogamma0 + self._Onu0 + self._Ok0) @property def Otot0(self): """Omega total; the total density/critical density at z=0.""" return 1.0 def Otot(self, z): """The total density parameter at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshifts. Returns ------- Otot : ndarray or float Returns float if input scalar. Value of 1. """ return 1.0 if isinstance(z, (Number, np.generic)) else np.ones_like(z, subok=False) def __equiv__(self, other): """flat-FLRW equivalence. Use ``.is_equivalent()`` for actual check! Parameters ---------- other : `~astropy.cosmology.FLRW` subclass instance The object in which to compare. Returns ------- bool or `NotImplemented` `True` if 'other' is of the same class / non-flat class (e.g. ``FlatLambdaCDM`` and ``LambdaCDM``) has matching parameters and parameter values. `False` if 'other' is of the same class but has different parameters. `NotImplemented` otherwise. """ # check if case (1): same class & parameters if isinstance(other, FlatFLRWMixin): return super().__equiv__(other) # check cases (3, 4), if other is the non-flat version of this class # this makes the assumption that any further subclass of a flat cosmo # keeps the same physics. comparable_classes = [c for c in self.__class__.mro()[1:] if (issubclass(c, FLRW) and c is not FLRW)] if other.__class__ not in comparable_classes: return NotImplemented # check if have equivalent parameters # check all parameters in other match those in 'self' and 'other' has # no extra parameters (case (2)) except for 'Ode0' and that other params_eq = ( set(self.__all_parameters__) == set(other.__all_parameters__) # no extra and all(np.all(getattr(self, k) == getattr(other, k)) # equal for k in self.__parameters__) and other.is_flat ) return params_eq
a434ad034a2da4ce617c9ea595aed7b59b9e70d4d80412c0f8c432ec5abe42db
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from numpy import sqrt import astropy.units as u from astropy.cosmology.parameter import Parameter from astropy.cosmology.utils import aszarr from . import scalar_inv_efuncs from .base import FLRW, FlatFLRWMixin __all__ = ["wCDM", "FlatwCDM"] __doctest_requires__ = {'*': ['scipy']} class wCDM(FLRW): """ FLRW cosmology with a constant dark energy equation of state and curvature. This has one additional attribute beyond those of FLRW. Parameters ---------- H0 : float or scalar quantity-like ['frequency'] Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]. Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. Ode0 : float Omega dark energy: density of dark energy in units of the critical density at z=0. w0 : float, optional Dark energy equation of state at all redshifts. This is pressure/density for dark energy in units where c=1. A cosmological constant has w0=-1.0. Tcmb0 : float or scalar quantity-like ['temperature'], optional Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K]. Setting this to zero will turn off both photons and neutrinos (even massive ones). Neff : float, optional Effective number of Neutrino species. Default 3.04. m_nu : quantity-like ['energy', 'mass'] or array-like, optional Mass of each neutrino species in [eV] (mass-energy equivalency enabled). If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Typically this means you should provide three neutrino masses unless you are considering something like a sterile neutrino. Ob0 : float or None, optional Omega baryons: density of baryonic matter in units of the critical density at z=0. If this is set to None (the default), any computation that requires its value will raise an exception. name : str or None (optional, keyword-only) Name for this cosmological object. meta : mapping or None (optional, keyword-only) Metadata for the cosmology, e.g., a reference. Examples -------- >>> from astropy.cosmology import wCDM >>> cosmo = wCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) """ w0 = Parameter(doc="Dark energy equation of state.", fvalidate="float") def __init__(self, H0, Om0, Ode0, w0=-1.0, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV, Ob0=None, *, name=None, meta=None): super().__init__(H0=H0, Om0=Om0, Ode0=Ode0, Tcmb0=Tcmb0, Neff=Neff, m_nu=m_nu, Ob0=Ob0, name=name, meta=meta) self.w0 = w0 # Please see :ref:`astropy-cosmology-fast-integrals` for discussion # about what is being done here. if self._Tcmb0.value == 0: self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc_norel self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._w0) elif not self._massivenu: self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc_nomnu self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._Ogamma0 + self._Onu0, self._w0) else: self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._Ogamma0, self._neff_per_nu, self._nmasslessnu, self._nu_y_list, self._w0) def w(self, z): r"""Returns dark energy equation of state at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- w : ndarray or float The dark energy equation of state Returns `float` if the input is scalar. Notes ----- The dark energy equation of state is defined as :math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at redshift z and :math:`\rho(z)` is the density at redshift z, both in units where c=1. Here this is :math:`w(z) = w_0`. """ z = aszarr(z) return self._w0 * (np.ones(z.shape) if hasattr(z, "shape") else 1.0) def de_density_scale(self, z): r"""Evaluates the redshift dependence of the dark energy density. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- I : ndarray or float The scaling of the energy density of dark energy with redshift. Returns `float` if the input is scalar. Notes ----- The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`, and in this case is given by :math:`I = \left(1 + z\right)^{3\left(1 + w_0\right)}` """ return (aszarr(z) + 1.0) ** (3.0 * (1. + self._w0)) def efunc(self, z): """Function used to calculate H(z), the Hubble parameter. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- E : ndarray or float The redshift scaling of the Hubble constant. Returns `float` if the input is scalar. Defined such that :math:`H(z) = H_0 E(z)`. """ Or = self._Ogamma0 + (self._Onu0 if not self._massivenu else self._Ogamma0 * self.nu_relative_density(z)) zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless]) return sqrt(zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) + self._Ode0 * zp1 ** (3. * (1. + self._w0))) def inv_efunc(self, z): r"""Function used to calculate :math:`\frac{1}{H_z}`. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- E : ndarray or float The inverse redshift scaling of the Hubble constant. Returns `float` if the input is scalar. Defined such that :math:`H_z = H_0 / E`. """ Or = self._Ogamma0 + (self._Onu0 if not self._massivenu else self._Ogamma0 * self.nu_relative_density(z)) zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless]) return (zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) + self._Ode0 * zp1 ** (3. * (1. + self._w0)))**(-0.5) class FlatwCDM(FlatFLRWMixin, wCDM): """ FLRW cosmology with a constant dark energy equation of state and no spatial curvature. This has one additional attribute beyond those of FLRW. Parameters ---------- H0 : float or scalar quantity-like ['frequency'] Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]. Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. w0 : float, optional Dark energy equation of state at all redshifts. This is pressure/density for dark energy in units where c=1. A cosmological constant has w0=-1.0. Tcmb0 : float or scalar quantity-like ['temperature'], optional Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K]. Setting this to zero will turn off both photons and neutrinos (even massive ones). Neff : float, optional Effective number of Neutrino species. Default 3.04. m_nu : quantity-like ['energy', 'mass'] or array-like, optional Mass of each neutrino species in [eV] (mass-energy equivalency enabled). If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Typically this means you should provide three neutrino masses unless you are considering something like a sterile neutrino. Ob0 : float or None, optional Omega baryons: density of baryonic matter in units of the critical density at z=0. If this is set to None (the default), any computation that requires its value will raise an exception. name : str or None (optional, keyword-only) Name for this cosmological object. meta : mapping or None (optional, keyword-only) Metadata for the cosmology, e.g., a reference. Examples -------- >>> from astropy.cosmology import FlatwCDM >>> cosmo = FlatwCDM(H0=70, Om0=0.3, w0=-0.9) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) """ def __init__(self, H0, Om0, w0=-1.0, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV, Ob0=None, *, name=None, meta=None): super().__init__(H0=H0, Om0=Om0, Ode0=0.0, w0=w0, Tcmb0=Tcmb0, Neff=Neff, m_nu=m_nu, Ob0=Ob0, name=name, meta=meta) # Please see :ref:`astropy-cosmology-fast-integrals` for discussion # about what is being done here. if self._Tcmb0.value == 0: self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc_norel self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._w0) elif not self._massivenu: self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc_nomnu self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ogamma0 + self._Onu0, self._w0) else: self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ogamma0, self._neff_per_nu, self._nmasslessnu, self._nu_y_list, self._w0) def efunc(self, z): """Function used to calculate H(z), the Hubble parameter. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- E : ndarray or float The redshift scaling of the Hubble constant. Returns `float` if the input is scalar. Defined such that :math:`H(z) = H_0 E(z)`. """ Or = self._Ogamma0 + (self._Onu0 if not self._massivenu else self._Ogamma0 * self.nu_relative_density(z)) zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless]) return sqrt(zp1 ** 3 * (Or * zp1 + self._Om0) + self._Ode0 * zp1 ** (3.0 * (1 + self._w0))) def inv_efunc(self, z): r"""Function used to calculate :math:`\frac{1}{H_z}`. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- E : ndarray or float The inverse redshift scaling of the Hubble constant. Returns `float` if the input is scalar. Defined such that :math:`H(z) = H_0 E(z)`. """ Or = self._Ogamma0 + (self._Onu0 if not self._massivenu else self._Ogamma0 * self.nu_relative_density(z)) zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless]) return (zp1 ** 3 * (Or * zp1 + self._Om0) + self._Ode0 * zp1 ** (3. * (1. + self._w0)))**(-0.5)
de59010cff353c6199c11a92e171de58a8f568a97cb1564d2881af82c6854780
# Licensed under a 3-clause BSD style license - see LICENSE.rst from math import acos, cos, inf, sin, sqrt from numbers import Number import numpy as np from numpy import log import astropy.units as u from astropy.cosmology.utils import aszarr from astropy.utils.compat.optional_deps import HAS_SCIPY from . import scalar_inv_efuncs from .base import FLRW, FlatFLRWMixin # isort: split if HAS_SCIPY: from scipy.special import ellipkinc, hyp2f1 else: def ellipkinc(*args, **kwargs): raise ModuleNotFoundError("No module named 'scipy.special'") def hyp2f1(*args, **kwargs): raise ModuleNotFoundError("No module named 'scipy.special'") __all__ = ["LambdaCDM", "FlatLambdaCDM"] __doctest_requires__ = {'*': ['scipy']} class LambdaCDM(FLRW): """FLRW cosmology with a cosmological constant and curvature. This has no additional attributes beyond those of FLRW. Parameters ---------- H0 : float or scalar quantity-like ['frequency'] Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]. Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. Ode0 : float Omega dark energy: density of the cosmological constant in units of the critical density at z=0. Tcmb0 : float or scalar quantity-like ['temperature'], optional Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K]. Setting this to zero will turn off both photons and neutrinos (even massive ones). Neff : float, optional Effective number of Neutrino species. Default 3.04. m_nu : quantity-like ['energy', 'mass'] or array-like, optional Mass of each neutrino species in [eV] (mass-energy equivalency enabled). If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Typically this means you should provide three neutrino masses unless you are considering something like a sterile neutrino. Ob0 : float or None, optional Omega baryons: density of baryonic matter in units of the critical density at z=0. If this is set to None (the default), any computation that requires its value will raise an exception. name : str or None (optional, keyword-only) Name for this cosmological object. meta : mapping or None (optional, keyword-only) Metadata for the cosmology, e.g., a reference. Examples -------- >>> from astropy.cosmology import LambdaCDM >>> cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) """ def __init__(self, H0, Om0, Ode0, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV, Ob0=None, *, name=None, meta=None): super().__init__(H0=H0, Om0=Om0, Ode0=Ode0, Tcmb0=Tcmb0, Neff=Neff, m_nu=m_nu, Ob0=Ob0, name=name, meta=meta) # Please see :ref:`astropy-cosmology-fast-integrals` for discussion # about what is being done here. if self._Tcmb0.value == 0: self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc_norel self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0) if self._Ok0 == 0: self._optimize_flat_norad() else: self._comoving_distance_z1z2 = self._elliptic_comoving_distance_z1z2 elif not self._massivenu: self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc_nomnu self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._Ogamma0 + self._Onu0) else: self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._Ogamma0, self._neff_per_nu, self._nmasslessnu, self._nu_y_list) def _optimize_flat_norad(self): """Set optimizations for flat LCDM cosmologies with no radiation.""" # Call out the Om0=0 (de Sitter) and Om0=1 (Einstein-de Sitter) # The dS case is required because the hypergeometric case # for Omega_M=0 would lead to an infinity in its argument. # The EdS case is three times faster than the hypergeometric. if self._Om0 == 0: self._comoving_distance_z1z2 = self._dS_comoving_distance_z1z2 self._age = self._dS_age self._lookback_time = self._dS_lookback_time elif self._Om0 == 1: self._comoving_distance_z1z2 = self._EdS_comoving_distance_z1z2 self._age = self._EdS_age self._lookback_time = self._EdS_lookback_time else: self._comoving_distance_z1z2 = self._hypergeometric_comoving_distance_z1z2 self._age = self._flat_age self._lookback_time = self._flat_lookback_time def w(self, z): r"""Returns dark energy equation of state at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- w : ndarray or float The dark energy equation of state. Returns `float` if the input is scalar. Notes ----- The dark energy equation of state is defined as :math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at redshift z and :math:`\rho(z)` is the density at redshift z, both in units where c=1. Here this is :math:`w(z) = -1`. """ z = aszarr(z) return -1.0 * (np.ones(z.shape) if hasattr(z, "shape") else 1.0) def de_density_scale(self, z): r"""Evaluates the redshift dependence of the dark energy density. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- I : ndarray or float The scaling of the energy density of dark energy with redshift. Returns `float` if the input is scalar. Notes ----- The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`, and in this case is given by :math:`I = 1`. """ z = aszarr(z) return np.ones(z.shape) if hasattr(z, "shape") else 1.0 def _elliptic_comoving_distance_z1z2(self, z1, z2): r"""Comoving transverse distance in Mpc between two redshifts. This value is the transverse comoving distance at redshift ``z`` corresponding to an angular separation of 1 radian. This is the same as the comoving distance if :math:`\Omega_k` is zero. For :math:`\Omega_{rad} = 0` the comoving distance can be directly calculated as an elliptic integral [1]_. Not valid or appropriate for flat cosmologies (Ok0=0). Parameters ---------- z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshifts. Returns ------- d : `~astropy.units.Quantity` ['length'] Comoving distance in Mpc between each input redshift. References ---------- .. [1] Kantowski, R., Kao, J., & Thomas, R. (2000). Distance-Redshift in Inhomogeneous FLRW. arXiv e-prints, astro-ph/0002334. """ try: z1, z2 = np.broadcast_arrays(z1, z2) except ValueError as e: raise ValueError("z1 and z2 have different shapes") from e # The analytic solution is not valid for any of Om0, Ode0, Ok0 == 0. # Use the explicit integral solution for these cases. if self._Om0 == 0 or self._Ode0 == 0 or self._Ok0 == 0: return self._integral_comoving_distance_z1z2(z1, z2) b = -(27. / 2) * self._Om0**2 * self._Ode0 / self._Ok0**3 kappa = b / abs(b) if (b < 0) or (2 < b): def phi_z(Om0, Ok0, kappa, y1, A, z): return np.arccos(((z + 1.0) * Om0 / abs(Ok0) + kappa * y1 - A) / ((z + 1.0) * Om0 / abs(Ok0) + kappa * y1 + A)) v_k = pow(kappa * (b - 1) + sqrt(b * (b - 2)), 1. / 3) y1 = (-1 + kappa * (v_k + 1 / v_k)) / 3 A = sqrt(y1 * (3 * y1 + 2)) g = 1 / sqrt(A) k2 = (2 * A + kappa * (1 + 3 * y1)) / (4 * A) phi_z1 = phi_z(self._Om0, self._Ok0, kappa, y1, A, z1) phi_z2 = phi_z(self._Om0, self._Ok0, kappa, y1, A, z2) # Get lower-right 0<b<2 solution in Om0, Ode0 plane. # Fot the upper-left 0<b<2 solution the Big Bang didn't happen. elif (0 < b) and (b < 2) and self._Om0 > self._Ode0: def phi_z(Om0, Ok0, y1, y2, z): return np.arcsin(np.sqrt((y1 - y2) / ((z + 1.0) * Om0 / abs(Ok0) + y1))) yb = cos(acos(1 - b) / 3) yc = sqrt(3) * sin(acos(1 - b) / 3) y1 = (1. / 3) * (-1 + yb + yc) y2 = (1. / 3) * (-1 - 2 * yb) y3 = (1. / 3) * (-1 + yb - yc) g = 2 / sqrt(y1 - y2) k2 = (y1 - y3) / (y1 - y2) phi_z1 = phi_z(self._Om0, self._Ok0, y1, y2, z1) phi_z2 = phi_z(self._Om0, self._Ok0, y1, y2, z2) else: return self._integral_comoving_distance_z1z2(z1, z2) prefactor = self._hubble_distance / sqrt(abs(self._Ok0)) return prefactor * g * (ellipkinc(phi_z1, k2) - ellipkinc(phi_z2, k2)) def _dS_comoving_distance_z1z2(self, z1, z2): r""" Comoving line-of-sight distance in Mpc between objects at redshifts ``z1`` and ``z2`` in a flat, :math:`\Omega_{\Lambda}=1` cosmology (de Sitter). The comoving distance along the line-of-sight between two objects remains constant with time for objects in the Hubble flow. The de Sitter case has an analytic solution. Parameters ---------- z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshifts. Must be 1D or scalar. Returns ------- d : `~astropy.units.Quantity` ['length'] Comoving distance in Mpc between each input redshift. """ try: z1, z2 = np.broadcast_arrays(z1, z2) except ValueError as e: raise ValueError("z1 and z2 have different shapes") from e return self._hubble_distance * (z2 - z1) def _EdS_comoving_distance_z1z2(self, z1, z2): r""" Comoving line-of-sight distance in Mpc between objects at redshifts ``z1`` and ``z2`` in a flat, :math:`\Omega_M=1` cosmology (Einstein - de Sitter). The comoving distance along the line-of-sight between two objects remains constant with time for objects in the Hubble flow. For :math:`\Omega_M=1`, :math:`\Omega_{rad}=0` the comoving distance has an analytic solution. Parameters ---------- z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshifts. Must be 1D or scalar. Returns ------- d : `~astropy.units.Quantity` ['length'] Comoving distance in Mpc between each input redshift. """ try: z1, z2 = np.broadcast_arrays(z1, z2) except ValueError as e: raise ValueError("z1 and z2 have different shapes") from e prefactor = 2 * self._hubble_distance return prefactor * ((z1 + 1.0)**(-1./2) - (z2 + 1.0)**(-1./2)) def _hypergeometric_comoving_distance_z1z2(self, z1, z2): r""" Comoving line-of-sight distance in Mpc between objects at redshifts ``z1`` and ``z2``. The comoving distance along the line-of-sight between two objects remains constant with time for objects in the Hubble flow. For :math:`\Omega_{rad} = 0` the comoving distance can be directly calculated as a hypergeometric function [1]_. Parameters ---------- z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshifts. Returns ------- d : `~astropy.units.Quantity` ['length'] Comoving distance in Mpc between each input redshift. References ---------- .. [1] Baes, M., Camps, P., & Van De Putte, D. (2017). Analytical expressions and numerical evaluation of the luminosity distance in a flat cosmology. MNRAS, 468(1), 927-930. """ try: z1, z2 = np.broadcast_arrays(z1, z2) except ValueError as e: raise ValueError("z1 and z2 have different shapes") from e s = ((1 - self._Om0) / self._Om0) ** (1./3) # Use np.sqrt here to handle negative s (Om0>1). prefactor = self._hubble_distance / np.sqrt(s * self._Om0) return prefactor * (self._T_hypergeometric(s / (z1 + 1.0)) - self._T_hypergeometric(s / (z2 + 1.0))) def _T_hypergeometric(self, x): r"""Compute value using Gauss Hypergeometric function 2F1. .. math:: T(x) = 2 \sqrt(x) _{2}F_{1}\left(\frac{1}{6}, \frac{1}{2}; \frac{7}{6}; -x^3 \right) Notes ----- The :func:`scipy.special.hyp2f1` code already implements the hypergeometric transformation suggested by Baes et al. [1]_ for use in actual numerical evaulations. References ---------- .. [1] Baes, M., Camps, P., & Van De Putte, D. (2017). Analytical expressions and numerical evaluation of the luminosity distance in a flat cosmology. MNRAS, 468(1), 927-930. """ return 2 * np.sqrt(x) * hyp2f1(1./6, 1./2, 7./6, -x**3) def _dS_age(self, z): """Age of the universe in Gyr at redshift ``z``. The age of a de Sitter Universe is infinite. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- t : `~astropy.units.Quantity` ['time'] The age of the universe in Gyr at each input redshift. """ t = (inf if isinstance(z, Number) else np.full_like(z, inf, dtype=float)) return self._hubble_time * t def _EdS_age(self, z): r"""Age of the universe in Gyr at redshift ``z``. For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos) the age can be directly calculated as an elliptic integral [1]_. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- t : `~astropy.units.Quantity` ['time'] The age of the universe in Gyr at each input redshift. References ---------- .. [1] Thomas, R., & Kantowski, R. (2000). Age-redshift relation for standard cosmology. PRD, 62(10), 103507. """ return (2./3) * self._hubble_time * (aszarr(z) + 1.0) ** (-1.5) def _flat_age(self, z): r"""Age of the universe in Gyr at redshift ``z``. For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos) the age can be directly calculated as an elliptic integral [1]_. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- t : `~astropy.units.Quantity` ['time'] The age of the universe in Gyr at each input redshift. References ---------- .. [1] Thomas, R., & Kantowski, R. (2000). Age-redshift relation for standard cosmology. PRD, 62(10), 103507. """ # Use np.sqrt, np.arcsinh instead of math.sqrt, math.asinh # to handle properly the complex numbers for 1 - Om0 < 0 prefactor = (2./3) * self._hubble_time / np.emath.sqrt(1 - self._Om0) arg = np.arcsinh(np.emath.sqrt((1 / self._Om0 - 1 + 0j) / (aszarr(z) + 1.0)**3)) return (prefactor * arg).real def _EdS_lookback_time(self, z): r"""Lookback time in Gyr to redshift ``z``. The lookback time is the difference between the age of the Universe now and the age at redshift ``z``. For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos) the age can be directly calculated as an elliptic integral. The lookback time is here calculated based on the ``age(0) - age(z)``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- t : `~astropy.units.Quantity` ['time'] Lookback time in Gyr to each input redshift. """ return self._EdS_age(0) - self._EdS_age(z) def _dS_lookback_time(self, z): r"""Lookback time in Gyr to redshift ``z``. The lookback time is the difference between the age of the Universe now and the age at redshift ``z``. For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos) the age can be directly calculated. .. math:: a = exp(H * t) \ \text{where t=0 at z=0} t = (1/H) (ln 1 - ln a) = (1/H) (0 - ln (1/(1+z))) = (1/H) ln(1+z) Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- t : `~astropy.units.Quantity` ['time'] Lookback time in Gyr to each input redshift. """ return self._hubble_time * log(aszarr(z) + 1.0) def _flat_lookback_time(self, z): r"""Lookback time in Gyr to redshift ``z``. The lookback time is the difference between the age of the Universe now and the age at redshift ``z``. For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos) the age can be directly calculated. The lookback time is here calculated based on the ``age(0) - age(z)``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- t : `~astropy.units.Quantity` ['time'] Lookback time in Gyr to each input redshift. """ return self._flat_age(0) - self._flat_age(z) def efunc(self, z): """Function used to calculate H(z), the Hubble parameter. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- E : ndarray or float The redshift scaling of the Hubble constant. Returns `float` if the input is scalar. Defined such that :math:`H(z) = H_0 E(z)`. """ # We override this because it takes a particularly simple # form for a cosmological constant Or = self._Ogamma0 + (self._Onu0 if not self._massivenu else self._Ogamma0 * self.nu_relative_density(z)) zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless]) return np.sqrt(zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) + self._Ode0) def inv_efunc(self, z): r"""Function used to calculate :math:`\frac{1}{H_z}`. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- E : ndarray or float The inverse redshift scaling of the Hubble constant. Returns `float` if the input is scalar. Defined such that :math:`H_z = H_0 / E`. """ Or = self._Ogamma0 + (self._Onu0 if not self._massivenu else self._Ogamma0 * self.nu_relative_density(z)) zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless]) return (zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) + self._Ode0)**(-0.5) class FlatLambdaCDM(FlatFLRWMixin, LambdaCDM): """FLRW cosmology with a cosmological constant and no curvature. This has no additional attributes beyond those of FLRW. Parameters ---------- H0 : float or scalar quantity-like ['frequency'] Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]. Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. Tcmb0 : float or scalar quantity-like ['temperature'], optional Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K]. Setting this to zero will turn off both photons and neutrinos (even massive ones). Neff : float, optional Effective number of Neutrino species. Default 3.04. m_nu : quantity-like ['energy', 'mass'] or array-like, optional Mass of each neutrino species in [eV] (mass-energy equivalency enabled). If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Typically this means you should provide three neutrino masses unless you are considering something like a sterile neutrino. Ob0 : float or None, optional Omega baryons: density of baryonic matter in units of the critical density at z=0. If this is set to None (the default), any computation that requires its value will raise an exception. name : str or None (optional, keyword-only) Name for this cosmological object. meta : mapping or None (optional, keyword-only) Metadata for the cosmology, e.g., a reference. Examples -------- >>> from astropy.cosmology import FlatLambdaCDM >>> cosmo = FlatLambdaCDM(H0=70, Om0=0.3) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) """ def __init__(self, H0, Om0, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV, Ob0=None, *, name=None, meta=None): super().__init__(H0=H0, Om0=Om0, Ode0=0.0, Tcmb0=Tcmb0, Neff=Neff, m_nu=m_nu, Ob0=Ob0, name=name, meta=meta) # Please see :ref:`astropy-cosmology-fast-integrals` for discussion # about what is being done here. if self._Tcmb0.value == 0: self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc_norel self._inv_efunc_scalar_args = (self._Om0, self._Ode0) # Repeat the optimization reassignments here because the init # of the LambaCDM above didn't actually create a flat cosmology. # That was done through the explicit tweak setting self._Ok0. self._optimize_flat_norad() elif not self._massivenu: self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc_nomnu self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ogamma0 + self._Onu0) else: self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ogamma0, self._neff_per_nu, self._nmasslessnu, self._nu_y_list) def efunc(self, z): """Function used to calculate H(z), the Hubble parameter. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- E : ndarray or float The redshift scaling of the Hubble constant. Returns `float` if the input is scalar. Defined such that :math:`H(z) = H_0 E(z)`. """ # We override this because it takes a particularly simple # form for a cosmological constant Or = self._Ogamma0 + (self._Onu0 if not self._massivenu else self._Ogamma0 * self.nu_relative_density(z)) zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless]) return np.sqrt(zp1 ** 3 * (Or * zp1 + self._Om0) + self._Ode0) def inv_efunc(self, z): r"""Function used to calculate :math:`\frac{1}{H_z}`. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- E : ndarray or float The inverse redshift scaling of the Hubble constant. Returns `float` if the input is scalar. Defined such that :math:`H_z = H_0 / E`. """ Or = self._Ogamma0 + (self._Onu0 if not self._massivenu else self._Ogamma0 * self.nu_relative_density(z)) zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless]) return (zp1 ** 3 * (Or * zp1 + self._Om0) + self._Ode0)**(-0.5)
748b818876a73277a3b8de190906ffe5e6198d21b9ade188ca16535366d392cd
# Licensed under a 3-clause BSD style license - see LICENSE.rst from numpy import exp import astropy.units as u from astropy.cosmology import units as cu from astropy.cosmology.parameter import Parameter from astropy.cosmology.utils import aszarr from . import scalar_inv_efuncs from .base import FLRW __all__ = ["wpwaCDM"] __doctest_requires__ = {'*': ['scipy']} class wpwaCDM(FLRW): r""" FLRW cosmology with a CPL dark energy equation of state, a pivot redshift, and curvature. The equation for the dark energy equation of state uses the CPL form as described in Chevallier & Polarski [1]_ and Linder [2]_, but modified to have a pivot redshift as in the findings of the Dark Energy Task Force [3]_: :math:`w(a) = w_p + w_a (a_p - a) = w_p + w_a( 1/(1+zp) - 1/(1+z) )`. Parameters ---------- H0 : float or scalar quantity-like ['frequency'] Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]. Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. Ode0 : float Omega dark energy: density of dark energy in units of the critical density at z=0. wp : float, optional Dark energy equation of state at the pivot redshift zp. This is pressure/density for dark energy in units where c=1. wa : float, optional Negative derivative of the dark energy equation of state with respect to the scale factor. A cosmological constant has wp=-1.0 and wa=0.0. zp : float or quantity-like ['redshift'], optional Pivot redshift -- the redshift where w(z) = wp Tcmb0 : float or scalar quantity-like ['temperature'], optional Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K]. Setting this to zero will turn off both photons and neutrinos (even massive ones). Neff : float, optional Effective number of Neutrino species. Default 3.04. m_nu : quantity-like ['energy', 'mass'] or array-like, optional Mass of each neutrino species in [eV] (mass-energy equivalency enabled). If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Typically this means you should provide three neutrino masses unless you are considering something like a sterile neutrino. Ob0 : float or None, optional Omega baryons: density of baryonic matter in units of the critical density at z=0. If this is set to None (the default), any computation that requires its value will raise an exception. name : str or None (optional, keyword-only) Name for this cosmological object. meta : mapping or None (optional, keyword-only) Metadata for the cosmology, e.g., a reference. Examples -------- >>> from astropy.cosmology import wpwaCDM >>> cosmo = wpwaCDM(H0=70, Om0=0.3, Ode0=0.7, wp=-0.9, wa=0.2, zp=0.4) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) References ---------- .. [1] Chevallier, M., & Polarski, D. (2001). Accelerating Universes with Scaling Dark Matter. International Journal of Modern Physics D, 10(2), 213-223. .. [2] Linder, E. (2003). Exploring the Expansion History of the Universe. Phys. Rev. Lett., 90, 091301. .. [3] Albrecht, A., Amendola, L., Bernstein, G., Clowe, D., Eisenstein, D., Guzzo, L., Hirata, C., Huterer, D., Kirshner, R., Kolb, E., & Nichol, R. (2009). Findings of the Joint Dark Energy Mission Figure of Merit Science Working Group. arXiv e-prints, arXiv:0901.0721. """ wp = Parameter(doc="Dark energy equation of state at the pivot redshift zp.", fvalidate="float") wa = Parameter(doc="Negative derivative of dark energy equation of state w.r.t. a.", fvalidate="float") zp = Parameter(doc="The pivot redshift, where w(z) = wp.", unit=cu.redshift) def __init__(self, H0, Om0, Ode0, wp=-1.0, wa=0.0, zp=0.0 * cu.redshift, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV, Ob0=None, *, name=None, meta=None): super().__init__(H0=H0, Om0=Om0, Ode0=Ode0, Tcmb0=Tcmb0, Neff=Neff, m_nu=m_nu, Ob0=Ob0, name=name, meta=meta) self.wp = wp self.wa = wa self.zp = zp # Please see :ref:`astropy-cosmology-fast-integrals` for discussion # about what is being done here. apiv = 1.0 / (1.0 + self._zp.value) if self._Tcmb0.value == 0: self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc_norel self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._wp, apiv, self._wa) elif not self._massivenu: self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc_nomnu self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._Ogamma0 + self._Onu0, self._wp, apiv, self._wa) else: self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._Ogamma0, self._neff_per_nu, self._nmasslessnu, self._nu_y_list, self._wp, apiv, self._wa) def w(self, z): r"""Returns dark energy equation of state at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- w : ndarray or float The dark energy equation of state Returns `float` if the input is scalar. Notes ----- The dark energy equation of state is defined as :math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at redshift z and :math:`\rho(z)` is the density at redshift z, both in units where c=1. Here this is :math:`w(z) = w_p + w_a (a_p - a)` where :math:`a = 1/1+z` and :math:`a_p = 1 / 1 + z_p`. """ apiv = 1.0 / (1.0 + self._zp.value) return self._wp + self._wa * (apiv - 1.0 / (aszarr(z) + 1.0)) def de_density_scale(self, z): r"""Evaluates the redshift dependence of the dark energy density. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- I : ndarray or float The scaling of the energy density of dark energy with redshift. Returns `float` if the input is scalar. Notes ----- The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`, and in this case is given by .. math:: a_p = \frac{1}{1 + z_p} I = \left(1 + z\right)^{3 \left(1 + w_p + a_p w_a\right)} \exp \left(-3 w_a \frac{z}{1+z}\right) """ z = aszarr(z) zp1 = z + 1.0 # (converts z [unit] -> z [dimensionless]) apiv = 1. / (1. + self._zp.value) return zp1 ** (3. * (1. + self._wp + apiv * self._wa)) * exp(-3. * self._wa * z / zp1)
ddfcbc66c577551cf511121e6bd8ffcc9e25f30c0e84fd5788d9afbdb35ae843
# Licensed under a 3-clause BSD style license - see LICENSE.rst from numpy import exp import astropy.units as u from astropy.cosmology.parameter import Parameter from astropy.cosmology.utils import aszarr from . import scalar_inv_efuncs from .base import FLRW, FlatFLRWMixin __all__ = ["w0waCDM", "Flatw0waCDM"] __doctest_requires__ = {'*': ['scipy']} class w0waCDM(FLRW): r"""FLRW cosmology with a CPL dark energy equation of state and curvature. The equation for the dark energy equation of state uses the CPL form as described in Chevallier & Polarski [1]_ and Linder [2]_: :math:`w(z) = w_0 + w_a (1-a) = w_0 + w_a z / (1+z)`. Parameters ---------- H0 : float or scalar quantity-like ['frequency'] Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]. Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. Ode0 : float Omega dark energy: density of dark energy in units of the critical density at z=0. w0 : float, optional Dark energy equation of state at z=0 (a=1). This is pressure/density for dark energy in units where c=1. wa : float, optional Negative derivative of the dark energy equation of state with respect to the scale factor. A cosmological constant has w0=-1.0 and wa=0.0. Tcmb0 : float or scalar quantity-like ['temperature'], optional Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K]. Setting this to zero will turn off both photons and neutrinos (even massive ones). Neff : float, optional Effective number of Neutrino species. Default 3.04. m_nu : quantity-like ['energy', 'mass'] or array-like, optional Mass of each neutrino species in [eV] (mass-energy equivalency enabled). If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Typically this means you should provide three neutrino masses unless you are considering something like a sterile neutrino. Ob0 : float or None, optional Omega baryons: density of baryonic matter in units of the critical density at z=0. If this is set to None (the default), any computation that requires its value will raise an exception. name : str or None (optional, keyword-only) Name for this cosmological object. meta : mapping or None (optional, keyword-only) Metadata for the cosmology, e.g., a reference. Examples -------- >>> from astropy.cosmology import w0waCDM >>> cosmo = w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.2) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) References ---------- .. [1] Chevallier, M., & Polarski, D. (2001). Accelerating Universes with Scaling Dark Matter. International Journal of Modern Physics D, 10(2), 213-223. .. [2] Linder, E. (2003). Exploring the Expansion History of the Universe. Phys. Rev. Lett., 90, 091301. """ w0 = Parameter(doc="Dark energy equation of state at z=0.", fvalidate="float") wa = Parameter(doc="Negative derivative of dark energy equation of state w.r.t. a.", fvalidate="float") def __init__(self, H0, Om0, Ode0, w0=-1.0, wa=0.0, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV, Ob0=None, *, name=None, meta=None): super().__init__(H0=H0, Om0=Om0, Ode0=Ode0, Tcmb0=Tcmb0, Neff=Neff, m_nu=m_nu, Ob0=Ob0, name=name, meta=meta) self.w0 = w0 self.wa = wa # Please see :ref:`astropy-cosmology-fast-integrals` for discussion # about what is being done here. if self._Tcmb0.value == 0: self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc_norel self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._w0, self._wa) elif not self._massivenu: self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc_nomnu self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._Ogamma0 + self._Onu0, self._w0, self._wa) else: self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._Ogamma0, self._neff_per_nu, self._nmasslessnu, self._nu_y_list, self._w0, self._wa) def w(self, z): r"""Returns dark energy equation of state at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- w : ndarray or float The dark energy equation of state Returns `float` if the input is scalar. Notes ----- The dark energy equation of state is defined as :math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at redshift z and :math:`\rho(z)` is the density at redshift z, both in units where c=1. Here this is :math:`w(z) = w_0 + w_a (1 - a) = w_0 + w_a \frac{z}{1+z}`. """ z = aszarr(z) return self._w0 + self._wa * z / (z + 1.0) def de_density_scale(self, z): r"""Evaluates the redshift dependence of the dark energy density. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- I : ndarray or float The scaling of the energy density of dark energy with redshift. Returns `float` if the input is scalar. Notes ----- The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`, and in this case is given by .. math:: I = \left(1 + z\right)^{3 \left(1 + w_0 + w_a\right)} \exp \left(-3 w_a \frac{z}{1+z}\right) """ z = aszarr(z) zp1 = z + 1.0 # (converts z [unit] -> z [dimensionless]) return zp1 ** (3 * (1 + self._w0 + self._wa)) * exp(-3 * self._wa * z / zp1) class Flatw0waCDM(FlatFLRWMixin, w0waCDM): """FLRW cosmology with a CPL dark energy equation of state and no curvature. The equation for the dark energy equation of state uses the CPL form as described in Chevallier & Polarski [1]_ and Linder [2]_: :math:`w(z) = w_0 + w_a (1-a) = w_0 + w_a z / (1+z)`. Parameters ---------- H0 : float or scalar quantity-like ['frequency'] Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]. Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. w0 : float, optional Dark energy equation of state at z=0 (a=1). This is pressure/density for dark energy in units where c=1. wa : float, optional Negative derivative of the dark energy equation of state with respect to the scale factor. A cosmological constant has w0=-1.0 and wa=0.0. Tcmb0 : float or scalar quantity-like ['temperature'], optional Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K]. Setting this to zero will turn off both photons and neutrinos (even massive ones). Neff : float, optional Effective number of Neutrino species. Default 3.04. m_nu : quantity-like ['energy', 'mass'] or array-like, optional Mass of each neutrino species in [eV] (mass-energy equivalency enabled). If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Typically this means you should provide three neutrino masses unless you are considering something like a sterile neutrino. Ob0 : float or None, optional Omega baryons: density of baryonic matter in units of the critical density at z=0. If this is set to None (the default), any computation that requires its value will raise an exception. name : str or None (optional, keyword-only) Name for this cosmological object. meta : mapping or None (optional, keyword-only) Metadata for the cosmology, e.g., a reference. Examples -------- >>> from astropy.cosmology import Flatw0waCDM >>> cosmo = Flatw0waCDM(H0=70, Om0=0.3, w0=-0.9, wa=0.2) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) References ---------- .. [1] Chevallier, M., & Polarski, D. (2001). Accelerating Universes with Scaling Dark Matter. International Journal of Modern Physics D, 10(2), 213-223. .. [2] Linder, E. (2003). Exploring the Expansion History of the Universe. Phys. Rev. Lett., 90, 091301. """ def __init__(self, H0, Om0, w0=-1.0, wa=0.0, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV, Ob0=None, *, name=None, meta=None): super().__init__(H0=H0, Om0=Om0, Ode0=0.0, w0=w0, wa=wa, Tcmb0=Tcmb0, Neff=Neff, m_nu=m_nu, Ob0=Ob0, name=name, meta=meta) # Please see :ref:`astropy-cosmology-fast-integrals` for discussion # about what is being done here. if self._Tcmb0.value == 0: self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc_norel self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._w0, self._wa) elif not self._massivenu: self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc_nomnu self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ogamma0 + self._Onu0, self._w0, self._wa) else: self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ogamma0, self._neff_per_nu, self._nmasslessnu, self._nu_y_list, self._w0, self._wa)
ecccf9a74444f9e5ab55c25f9138f3fd26a84ad764b3f9fab97b7c3907565066
# Licensed under a 3-clause BSD style license - see LICENSE.rst # STDLIB import inspect import random # THIRD PARTY import numpy as np import pytest # LOCAL from astropy.cosmology.core import Cosmology from astropy.cosmology.io.model import _CosmologyModel, from_model, to_model from astropy.cosmology.tests.helper import get_redshift_methods from astropy.modeling.models import Gaussian1D from astropy.utils.compat.optional_deps import HAS_SCIPY from .base import ToFromDirectTestBase, ToFromTestMixinBase ############################################################################### class ToFromModelTestMixin(ToFromTestMixinBase): """Tests for a Cosmology[To/From]Format with ``format="astropy.model"``. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must define a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmologyToFromFormat`` or ``TestCosmology`` for examples. """ @pytest.fixture(scope="class") def method_name(self, cosmo): # get methods, ignoring private and dunder methods = get_redshift_methods(cosmo, include_private=False, include_z2=True) # dynamically detect ABC and optional dependencies for n in tuple(methods): params = inspect.signature(getattr(cosmo, n)).parameters.keys() ERROR_SEIVE = (NotImplementedError, ValueError) # # ABC can't introspect for good input if not HAS_SCIPY: ERROR_SEIVE = ERROR_SEIVE + (ModuleNotFoundError, ) args = np.arange(len(params)) + 1 try: getattr(cosmo, n)(*args) except ERROR_SEIVE: methods.discard(n) # TODO! pytest doesn't currently allow multiple yields (`cosmo`) so # testing with 1 random method # yield from methods return random.choice(tuple(methods)) if methods else None # =============================================================== def test_fromformat_model_wrong_cls(self, from_format): """Test when Model is not the correct class.""" model = Gaussian1D(amplitude=10, mean=14) with pytest.raises(AttributeError): from_format(model) def test_toformat_model_not_method(self, to_format): """Test when method is not a method.""" with pytest.raises(AttributeError): to_format("astropy.model", method="this is definitely not a method.") def test_toformat_model_not_callable(self, to_format): """Test when method is actually an attribute.""" with pytest.raises(ValueError): to_format("astropy.model", method="name") def test_toformat_model(self, cosmo, to_format, method_name): """Test cosmology -> astropy.model.""" if method_name is None: # no test if no method return model = to_format("astropy.model", method=method_name) assert isinstance(model, _CosmologyModel) # Parameters expect = tuple([n for n in cosmo.__parameters__ if getattr(cosmo, n) is not None]) assert model.param_names == expect # scalar result args = np.arange(model.n_inputs) + 1 got = model.evaluate(*args) expected = getattr(cosmo, method_name)(*args) assert np.all(got == expected) got = model(*args) expected = getattr(cosmo, method_name)(*args) assert np.all(got == expected) # vector result if "scalar" not in method_name: args = (np.ones((model.n_inputs, 3)).T + np.arange(model.n_inputs)).T got = model.evaluate(*args) expected = getattr(cosmo, method_name)(*args) assert np.all(got == expected) got = model(*args) expected = getattr(cosmo, method_name)(*args) assert np.all(got == expected) def test_tofromformat_model_instance(self, cosmo_cls, cosmo, method_name, to_format, from_format): """Test cosmology -> astropy.model -> cosmology.""" if method_name is None: # no test if no method return # ------------ # To Model # this also serves as a test of all added methods / attributes # in _CosmologyModel. model = to_format("astropy.model", method=method_name) assert isinstance(model, _CosmologyModel) assert model.cosmology_class is cosmo_cls assert model.cosmology == cosmo assert model.method_name == method_name # ------------ # From Model # it won't error if everything matches up got = from_format(model, format="astropy.model") assert got == cosmo assert set(cosmo.meta.keys()).issubset(got.meta.keys()) # Note: model adds parameter attributes to the metadata # also it auto-identifies 'format' got = from_format(model) assert got == cosmo assert set(cosmo.meta.keys()).issubset(got.meta.keys()) def test_fromformat_model_subclass_partial_info(self): """ Test writing from an instance and reading from that class. This works with missing information. """ pass # there's no partial information with a Model @pytest.mark.parametrize("format", [True, False, None, "astropy.model"]) def test_is_equivalent_to_model(self, cosmo, method_name, to_format, format): """Test :meth:`astropy.cosmology.Cosmology.is_equivalent`. This test checks that Cosmology equivalency can be extended to any Python object that can be converted to a Cosmology -- in this case a model. """ if method_name is None: # no test if no method return obj = to_format("astropy.model", method=method_name) assert not isinstance(obj, Cosmology) is_equiv = cosmo.is_equivalent(obj, format=format) assert is_equiv is (True if format is not False else False) class TestToFromModel(ToFromDirectTestBase, ToFromModelTestMixin): """Directly test ``to/from_model``.""" def setup_class(self): self.functions = {"to": to_model, "from": from_model}
0255fe457bf1b3f16eea1de86cd524a9922c343a523f26fa1d30e968856e0ddd
# Licensed under a 3-clause BSD style license - see LICENSE.rst # STDLIB import inspect from collections import OrderedDict # THIRD PARTY import numpy as np import pytest # LOCAL from astropy.cosmology import Cosmology from astropy.cosmology.io.mapping import from_mapping, to_mapping from .base import ToFromDirectTestBase, ToFromTestMixinBase ############################################################################### class ToFromMappingTestMixin(ToFromTestMixinBase): """Tests for a Cosmology[To/From]Format with ``format="mapping"``. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must define a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmology`` for an example. """ def test_to_mapping_default(self, cosmo, to_format): """Test default usage of Cosmology -> mapping.""" m = to_format('mapping') keys = tuple(m.keys()) assert isinstance(m, dict) # Check equality of all expected items assert keys[0] == "cosmology" assert m.pop("cosmology") is cosmo.__class__ assert keys[1] == "name" assert m.pop("name") == cosmo.name for i, k in enumerate(cosmo.__parameters__, start=2): assert keys[i] == k assert np.array_equal(m.pop(k), getattr(cosmo, k)) assert keys[-1] == "meta" assert m.pop("meta") == cosmo.meta # No unexpected items assert not m def test_to_mapping_wrong_cls(self, to_format): """Test incorrect argument ``cls`` in ``to_mapping()``.""" with pytest.raises(TypeError, match="'cls' must be"): to_format('mapping', cls=list) @pytest.mark.parametrize("map_cls", [dict, OrderedDict]) def test_to_mapping_cls(self, to_format, map_cls): """Test argument ``cls`` in ``to_mapping()``.""" m = to_format('mapping', cls=map_cls) assert isinstance(m, map_cls) # test type def test_to_mapping_cosmology_as_str(self, cosmo_cls, to_format): """Test argument ``cosmology_as_str`` in ``to_mapping()``.""" default = to_format('mapping') # Cosmology is the class m = to_format('mapping', cosmology_as_str=False) assert inspect.isclass(m["cosmology"]) assert cosmo_cls is m["cosmology"] assert m == default # False is the default option # Cosmology is a string m = to_format('mapping', cosmology_as_str=True) assert isinstance(m["cosmology"], str) assert m["cosmology"] == cosmo_cls.__qualname__ # Correct class assert tuple(m.keys())[0] == "cosmology" # Stayed at same index def test_tofrom_mapping_cosmology_as_str(self, cosmo, to_format, from_format): """Test roundtrip with ``cosmology_as_str=True``. The test for the default option (`False`) is in ``test_tofrom_mapping_instance``. """ m = to_format('mapping', cosmology_as_str=True) got = from_format(m, format="mapping") assert got == cosmo assert got.meta == cosmo.meta def test_to_mapping_move_from_meta(self, to_format): """Test argument ``move_from_meta`` in ``to_mapping()``.""" default = to_format('mapping') # Metadata is 'separate' from main mapping m = to_format('mapping', move_from_meta=False) assert "meta" in m.keys() assert not any([k in m for k in m["meta"]]) # Not added to main assert m == default # False is the default option # Metadata is mixed into main mapping. m = to_format('mapping', move_from_meta=True) assert "meta" not in m.keys() assert all([k in m for k in default["meta"]]) # All added to main # The parameters take precedence over the metadata assert all([np.array_equal(v, m[k]) for k, v in default.items() if k != "meta"]) def test_tofrom_mapping_move_tofrom_meta(self, cosmo, to_format, from_format): """Test roundtrip of ``move_from/to_meta`` in ``to/from_mapping()``.""" # Metadata is mixed into main mapping. m = to_format('mapping', move_from_meta=True) # (Just adding something to ensure there's 'metadata') m["mismatching"] = "will error" # (Tests are different if the last argument is a **kwarg) if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4: got = from_format(m, format="mapping") assert got.name == cosmo.name assert "mismatching" not in got.meta return # don't continue testing # Reading with mismatching parameters errors... with pytest.raises(TypeError, match="there are unused parameters"): from_format(m, format="mapping") # unless mismatched are moved to meta. got = from_format(m, format="mapping", move_to_meta=True) assert got == cosmo # (Doesn't check metadata) assert got.meta["mismatching"] == "will error" # ----------------------------------------------------- def test_from_not_mapping(self, cosmo, from_format): """Test incorrect map type in ``from_mapping()``.""" with pytest.raises((TypeError, ValueError)): from_format("NOT A MAP", format="mapping") def test_from_mapping_default(self, cosmo, to_format, from_format): """Test (cosmology -> Mapping) -> cosmology.""" m = to_format('mapping') # Read from exactly as given. got = from_format(m, format="mapping") assert got == cosmo assert got.meta == cosmo.meta # Reading auto-identifies 'format' got = from_format(m) assert got == cosmo assert got.meta == cosmo.meta def test_fromformat_subclass_partial_info_mapping(self, cosmo): """ Test writing from an instance and reading from that class. This works with missing information. """ m = cosmo.to_format("mapping") # partial information m.pop("cosmology", None) m.pop("Tcmb0", None) # read with the same class that wrote fills in the missing info with # the default value got = cosmo.__class__.from_format(m, format="mapping") got2 = Cosmology.from_format(m, format="mapping", cosmology=cosmo.__class__) got3 = Cosmology.from_format(m, format="mapping", cosmology=cosmo.__class__.__qualname__) assert (got == got2) and (got2 == got3) # internal consistency # not equal, because Tcmb0 is changed, which also changes m_nu assert got != cosmo assert got.Tcmb0 == cosmo.__class__._init_signature.parameters["Tcmb0"].default assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo # but the metadata is the same assert got.meta == cosmo.meta @pytest.mark.parametrize("format", [True, False, None, "mapping"]) def test_is_equivalent_to_mapping(self, cosmo, to_format, format): """Test :meth:`astropy.cosmology.Cosmology.is_equivalent`. This test checks that Cosmology equivalency can be extended to any Python object that can be converted to a Cosmology -- in this case a mapping. """ obj = to_format("mapping") assert not isinstance(obj, Cosmology) is_equiv = cosmo.is_equivalent(obj, format=format) assert is_equiv is (True if format is not False else False) class TestToFromMapping(ToFromDirectTestBase, ToFromMappingTestMixin): """Directly test ``to/from_mapping``.""" def setup_class(self): self.functions = {"to": to_mapping, "from": from_mapping} @pytest.mark.skip("N/A") def test_fromformat_subclass_partial_info_mapping(self): """This test does not apply to the direct functions."""
c562c51b711dc907f94d680f344079818c61ae63c60eb01eb6f941e9b0922596
# Licensed under a 3-clause BSD style license - see LICENSE.rst # THIRD PARTY import pytest # LOCAL from astropy.cosmology import Cosmology from astropy.cosmology.core import _COSMOLOGY_CLASSES from astropy.cosmology.io.table import from_table, to_table from astropy.table import QTable, Table, vstack from .base import ToFromDirectTestBase, ToFromTestMixinBase ############################################################################### class ToFromTableTestMixin(ToFromTestMixinBase): """ Tests for a Cosmology[To/From]Format with ``format="astropy.table"``. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must define a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmology`` for an example. """ def test_to_table_bad_index(self, from_format, to_format): """Test if argument ``index`` is incorrect""" tbl = to_format("astropy.table") # single-row table and has a non-0/None index with pytest.raises(IndexError, match="index 2 out of range"): from_format(tbl, index=2, format="astropy.table") # string index where doesn't match with pytest.raises(KeyError, match="No matches found for key"): from_format(tbl, index="row 0", format="astropy.table") # ----------------------- def test_to_table_failed_cls(self, to_format): """Test failed table type.""" with pytest.raises(TypeError, match="'cls' must be"): to_format('astropy.table', cls=list) @pytest.mark.parametrize("tbl_cls", [QTable, Table]) def test_to_table_cls(self, to_format, tbl_cls): tbl = to_format('astropy.table', cls=tbl_cls) assert isinstance(tbl, tbl_cls) # test type # ----------------------- @pytest.mark.parametrize("in_meta", [True, False]) def test_to_table_in_meta(self, cosmo_cls, to_format, in_meta): """Test where the cosmology class is placed.""" tbl = to_format('astropy.table', cosmology_in_meta=in_meta) # if it's in metadata, it's not a column. And vice versa. if in_meta: assert tbl.meta["cosmology"] == cosmo_cls.__qualname__ assert "cosmology" not in tbl.colnames # not also a column else: assert tbl["cosmology"][0] == cosmo_cls.__qualname__ assert "cosmology" not in tbl.meta # ----------------------- def test_to_table(self, cosmo_cls, cosmo, to_format): """Test cosmology -> astropy.table.""" tbl = to_format("astropy.table") # Test properties of Table. assert isinstance(tbl, QTable) assert tbl.meta["cosmology"] == cosmo_cls.__qualname__ assert tbl["name"] == cosmo.name assert tbl.indices # indexed # Test each Parameter column has expected information. for n in cosmo.__parameters__: P = getattr(cosmo_cls, n) # Parameter col = tbl[n] # Column # Compare the two assert col.info.name == P.name assert col.info.description == P.__doc__ assert col.info.format == (None if col[0] is None else P.format_spec) assert col.info.meta == (cosmo.meta.get(n) or {}) # ----------------------- def test_from_not_table(self, cosmo, from_format): """Test not passing a Table to the Table parser.""" with pytest.raises((TypeError, ValueError)): from_format("NOT A TABLE", format="astropy.table") def test_tofrom_table_instance(self, cosmo_cls, cosmo, from_format, to_format): """Test cosmology -> astropy.table -> cosmology.""" tbl = to_format("astropy.table") # add information tbl["mismatching"] = "will error" # tests are different if the last argument is a **kwarg if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4: got = from_format(tbl, format="astropy.table") assert got.__class__ is cosmo_cls assert got.name == cosmo.name assert "mismatching" not in got.meta return # don't continue testing # read with mismatching parameters errors with pytest.raises(TypeError, match="there are unused parameters"): from_format(tbl, format="astropy.table") # unless mismatched are moved to meta got = from_format(tbl, format="astropy.table", move_to_meta=True) assert got == cosmo assert got.meta["mismatching"] == "will error" # it won't error if everything matches up tbl.remove_column("mismatching") got = from_format(tbl, format="astropy.table") assert got == cosmo # and it will also work if the cosmology is a class # Note this is not the default output of ``to_format``. tbl.meta["cosmology"] = _COSMOLOGY_CLASSES[tbl.meta["cosmology"]] got = from_format(tbl, format="astropy.table") assert got == cosmo # also it auto-identifies 'format' got = from_format(tbl) assert got == cosmo def test_fromformat_table_subclass_partial_info(self, cosmo_cls, cosmo, from_format, to_format): """ Test writing from an instance and reading from that class. This works with missing information. """ # test to_format tbl = to_format("astropy.table") assert isinstance(tbl, QTable) # partial information tbl.meta.pop("cosmology", None) del tbl["Tcmb0"] # read with the same class that wrote fills in the missing info with # the default value got = cosmo_cls.from_format(tbl, format="astropy.table") got2 = from_format(tbl, format="astropy.table", cosmology=cosmo_cls) got3 = from_format(tbl, format="astropy.table", cosmology=cosmo_cls.__qualname__) assert (got == got2) and (got2 == got3) # internal consistency # not equal, because Tcmb0 is changed, which also changes m_nu assert got != cosmo assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo # but the metadata is the same assert got.meta == cosmo.meta @pytest.mark.parametrize("add_index", [True, False]) def test_tofrom_table_mutlirow(self, cosmo_cls, cosmo, from_format, add_index): """Test if table has multiple rows.""" # ------------ # To Table cosmo1 = cosmo.clone(name="row 0") cosmo2 = cosmo.clone(name="row 2") tbl = vstack([c.to_format("astropy.table") for c in (cosmo1, cosmo, cosmo2)], metadata_conflicts='silent') assert isinstance(tbl, QTable) assert tbl.meta["cosmology"] == cosmo_cls.__qualname__ assert tbl[1]["name"] == cosmo.name # whether to add an index. `from_format` can work with or without. if add_index: tbl.add_index("name", unique=True) # ------------ # From Table # it will error on a multi-row table with pytest.raises(ValueError, match="need to select a specific row"): from_format(tbl, format="astropy.table") # unless the index argument is provided got = from_format(tbl, index=1, format="astropy.table") assert got == cosmo # the index can be a string got = from_format(tbl, index=cosmo.name, format="astropy.table") assert got == cosmo # when there's more than one cosmology found tbls = vstack([tbl, tbl], metadata_conflicts="silent") with pytest.raises(ValueError, match="more than one"): from_format(tbls, index=cosmo.name, format="astropy.table") @pytest.mark.parametrize("format", [True, False, None, "astropy.table"]) def test_is_equivalent_to_table(self, cosmo, to_format, format): """Test :meth:`astropy.cosmology.Cosmology.is_equivalent`. This test checks that Cosmology equivalency can be extended to any Python object that can be converted to a Cosmology -- in this case a |Table|. """ obj = to_format("astropy.table") assert not isinstance(obj, Cosmology) is_equiv = cosmo.is_equivalent(obj, format=format) assert is_equiv is (True if format is not False else False) class TestToFromTable(ToFromDirectTestBase, ToFromTableTestMixin): """Directly test ``to/from_table``.""" def setup_class(self): self.functions = {"to": to_table, "from": from_table}
fdb18202dbedabf565214a6d0d15edcbf15cfcfbec39afae38a81f7ca0a46d7c
# Licensed under a 3-clause BSD style license - see LICENSE.rst # STDLIB # THIRD PARTY import pytest # LOCAL import astropy.units as u from astropy.cosmology import Cosmology, FlatLambdaCDM, Planck18 from astropy.cosmology import units as cu from astropy.cosmology.io.yaml import from_yaml, to_yaml, yaml_constructor, yaml_representer from astropy.io.misc.yaml import AstropyDumper, dump, load from .base import ToFromDirectTestBase, ToFromTestMixinBase ############################################################################## # Test Serializer def test_yaml_representer(): """Test :func:`~astropy.cosmology.io.yaml.yaml_representer`.""" # test function `representer` representer = yaml_representer("!astropy.cosmology.flrw.LambdaCDM") assert callable(representer) # test the normal method of dumping to YAML yml = dump(Planck18) assert isinstance(yml, str) assert yml.startswith("!astropy.cosmology.flrw.FlatLambdaCDM") def test_yaml_constructor(): """Test :func:`~astropy.cosmology.io.yaml.yaml_constructor`.""" # test function `constructor` constructor = yaml_constructor(FlatLambdaCDM) assert callable(constructor) # it's too hard to manually construct a node, so we only test dump/load # this is also a good round-trip test yml = dump(Planck18) with u.add_enabled_units(cu): # needed for redshift units cosmo = load(yml) assert isinstance(cosmo, FlatLambdaCDM) assert cosmo == Planck18 assert cosmo.meta == Planck18.meta ############################################################################## # Test Unified I/O class ToFromYAMLTestMixin(ToFromTestMixinBase): """ Tests for a Cosmology[To/From]Format with ``format="yaml"``. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must define a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmologyToFromFormat`` or ``TestCosmology`` for examples. """ @pytest.fixture def xfail_if_not_registered_with_yaml(self, cosmo_cls): """ YAML I/O only works on registered classes. So the thing to check is if this class is registered. If not, :func:`pytest.xfail` this test. Some of the tests define custom cosmologies. They are not registered. """ if cosmo_cls not in AstropyDumper.yaml_representers: pytest.xfail(f"Cosmologies of type {cosmo_cls} are not registered with YAML.") # =============================================================== def test_to_yaml(self, cosmo, to_format, xfail_if_not_registered_with_yaml): """Test cosmology -> YAML.""" yml = to_format('yaml') assert isinstance(yml, str) # test type assert yml.startswith("!astropy.cosmology.") def test_from_yaml_default(self, cosmo, to_format, from_format, xfail_if_not_registered_with_yaml): """Test cosmology -> YAML -> cosmology.""" yml = to_format('yaml') got = from_format(yml, format="yaml") # (cannot autoidentify) assert got.name == cosmo.name assert got.meta == cosmo.meta # it won't error if everything matches up got = from_format(yml, format="yaml") assert got == cosmo assert got.meta == cosmo.meta # auto-identify test moved because it doesn't work. # see test_from_yaml_autoidentify def test_from_yaml_autoidentify(self, cosmo, to_format, from_format, xfail_if_not_registered_with_yaml): """As a non-path string, it does NOT auto-identifies 'format'. TODO! this says there should be different types of I/O registries. not just hacking object conversion on top of file I/O. """ assert self.can_autodentify("yaml") is False # Showing the specific error. The str is interpreted as a file location # but is too long a file name. yml = to_format('yaml') with pytest.raises((FileNotFoundError, OSError)): # OSError in Windows from_format(yml) # # TODO! this is a challenging test to write. It's also unlikely to happen. # def test_fromformat_subclass_partial_info_yaml(self, cosmo): # """ # Test writing from an instance and reading from that class. # This works with missing information. # """ # ----------------------------------------------------- @pytest.mark.parametrize("format", [True, False, None]) def test_is_equivalent_to_yaml(self, cosmo, to_format, format, xfail_if_not_registered_with_yaml): """Test :meth:`astropy.cosmology.Cosmology.is_equivalent`. This test checks that Cosmology equivalency can be extended to any Python object that can be converted to a Cosmology -- in this case a YAML string. YAML can't be identified without "format" specified. """ obj = to_format("yaml") assert not isinstance(obj, Cosmology) is_equiv = cosmo.is_equivalent(obj, format=format) assert is_equiv is False def test_is_equivalent_to_yaml_specify_format(self, cosmo, to_format, xfail_if_not_registered_with_yaml): """Test :meth:`astropy.cosmology.Cosmology.is_equivalent`. Same as ``test_is_equivalent_to_yaml`` but with ``format="yaml"``. """ assert cosmo.is_equivalent(to_format("yaml"), format="yaml") is True class TestToFromYAML(ToFromDirectTestBase, ToFromYAMLTestMixin): """ Directly test ``to/from_yaml``. These are not public API and are discouraged from use, in favor of ``Cosmology.to/from_format(..., format="yaml")``, but should be tested regardless b/c 3rd party packages might use these in their Cosmology I/O. Also, it's cheap to test. """ def setup_class(self): """Set up fixtures to use ``to/from_yaml``, not the I/O abstractions.""" self.functions = {"to": to_yaml, "from": from_yaml} @pytest.fixture(scope="class", autouse=True) def setup(self): """ Setup and teardown for tests. This overrides from super because `ToFromDirectTestBase` adds a custom Cosmology ``CosmologyWithKwargs`` that is not registered with YAML. """ yield # run tests def test_from_yaml_autoidentify(self, cosmo, to_format, from_format): """ If directly calling the function there's no auto-identification. So this overrides the test from `ToFromYAMLTestMixin` """
d244ceac0cc3f6bb986d145fe8d13ca13da850edff014f7198657030e715cdcc
# Licensed under a 3-clause BSD style license - see LICENSE.rst # THIRD PARTY import pytest # LOCAL from astropy.cosmology.core import _COSMOLOGY_CLASSES from astropy.cosmology.io.ecsv import read_ecsv, write_ecsv from astropy.table import QTable, Table, vstack from .base import ReadWriteDirectTestBase, ReadWriteTestMixinBase ############################################################################### class ReadWriteECSVTestMixin(ReadWriteTestMixinBase): """ Tests for a Cosmology[Read/Write] with ``format="ascii.ecsv"``. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must dfine a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmology`` for an example. """ def test_to_ecsv_bad_index(self, read, write, tmp_path): """Test if argument ``index`` is incorrect""" fp = tmp_path / "test_to_ecsv_bad_index.ecsv" write(fp, format="ascii.ecsv") # single-row table and has a non-0/None index with pytest.raises(IndexError, match="index 2 out of range"): read(fp, index=2, format="ascii.ecsv") # string index where doesn't match with pytest.raises(KeyError, match="No matches found for key"): read(fp, index="row 0", format="ascii.ecsv") # ----------------------- def test_to_ecsv_failed_cls(self, write, tmp_path): """Test failed table type.""" fp = tmp_path / "test_to_ecsv_failed_cls.ecsv" with pytest.raises(TypeError, match="'cls' must be"): write(fp, format='ascii.ecsv', cls=list) @pytest.mark.parametrize("tbl_cls", [QTable, Table]) def test_to_ecsv_cls(self, write, tbl_cls, tmp_path): fp = tmp_path / "test_to_ecsv_cls.ecsv" write(fp, format='ascii.ecsv', cls=tbl_cls) # ----------------------- @pytest.mark.parametrize("in_meta", [True, False]) def test_to_ecsv_in_meta(self, cosmo_cls, write, in_meta, tmp_path, add_cu): """Test where the cosmology class is placed.""" fp = tmp_path / "test_to_ecsv_in_meta.ecsv" write(fp, format='ascii.ecsv', cosmology_in_meta=in_meta) # if it's in metadata, it's not a column. And vice versa. tbl = QTable.read(fp) if in_meta: assert tbl.meta["cosmology"] == cosmo_cls.__qualname__ assert "cosmology" not in tbl.colnames # not also a column else: assert tbl["cosmology"][0] == cosmo_cls.__qualname__ assert "cosmology" not in tbl.meta # ----------------------- def test_readwrite_ecsv_instance(self, cosmo_cls, cosmo, read, write, tmp_path, add_cu): """Test cosmology -> ascii.ecsv -> cosmology.""" fp = tmp_path / "test_readwrite_ecsv_instance.ecsv" # ------------ # To Table write(fp, format="ascii.ecsv") # some checks on the saved file tbl = QTable.read(fp) assert tbl.meta["cosmology"] == cosmo_cls.__qualname__ assert tbl["name"] == cosmo.name # ------------ # From Table tbl["mismatching"] = "will error" tbl.write(fp, format="ascii.ecsv", overwrite=True) # tests are different if the last argument is a **kwarg if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4: got = read(fp, format="ascii.ecsv") assert got.__class__ is cosmo_cls assert got.name == cosmo.name assert "mismatching" not in got.meta return # don't continue testing # read with mismatching parameters errors with pytest.raises(TypeError, match="there are unused parameters"): read(fp, format="ascii.ecsv") # unless mismatched are moved to meta got = read(fp, format="ascii.ecsv", move_to_meta=True) assert got == cosmo assert got.meta["mismatching"] == "will error" # it won't error if everything matches up tbl.remove_column("mismatching") tbl.write(fp, format="ascii.ecsv", overwrite=True) got = read(fp, format="ascii.ecsv") assert got == cosmo # and it will also work if the cosmology is a class # Note this is not the default output of ``write``. tbl.meta["cosmology"] = _COSMOLOGY_CLASSES[tbl.meta["cosmology"]] got = read(fp, format="ascii.ecsv") assert got == cosmo # also it auto-identifies 'format' got = read(fp) assert got == cosmo def test_readwrite_ecsv_subclass_partial_info(self, cosmo_cls, cosmo, read, write, tmp_path, add_cu): """ Test writing from an instance and reading from that class. This works with missing information. """ fp = tmp_path / "test_read_ecsv_subclass_partial_info.ecsv" # test write write(fp, format="ascii.ecsv") # partial information tbl = QTable.read(fp) tbl.meta.pop("cosmology", None) del tbl["Tcmb0"] tbl.write(fp, overwrite=True) # read with the same class that wrote fills in the missing info with # the default value got = cosmo_cls.read(fp, format="ascii.ecsv") got2 = read(fp, format="ascii.ecsv", cosmology=cosmo_cls) got3 = read(fp, format="ascii.ecsv", cosmology=cosmo_cls.__qualname__) assert (got == got2) and (got2 == got3) # internal consistency # not equal, because Tcmb0 is changed, which also changes m_nu assert got != cosmo assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo # but the metadata is the same assert got.meta == cosmo.meta def test_readwrite_ecsv_mutlirow(self, cosmo, read, write, tmp_path, add_cu): """Test if table has multiple rows.""" fp = tmp_path / "test_readwrite_ecsv_mutlirow.ecsv" # Make cosmo1 = cosmo.clone(name="row 0") cosmo2 = cosmo.clone(name="row 2") tbl = vstack([c.to_format("astropy.table") for c in (cosmo1, cosmo, cosmo2)], metadata_conflicts='silent') tbl.write(fp, format="ascii.ecsv") # ------------ # From Table # it will error on a multi-row table with pytest.raises(ValueError, match="need to select a specific row"): read(fp, format="ascii.ecsv") # unless the index argument is provided got = read(fp, index=1, format="ascii.ecsv") assert got == cosmo # the index can be a string got = read(fp, index=cosmo.name, format="ascii.ecsv") assert got == cosmo # it's better if the table already has an index # this will be identical to the previous ``got`` tbl.add_index("name") got2 = read(fp, index=cosmo.name, format="ascii.ecsv") assert got2 == cosmo class TestReadWriteECSV(ReadWriteDirectTestBase, ReadWriteECSVTestMixin): """ Directly test ``read/write_ecsv``. These are not public API and are discouraged from use, in favor of ``Cosmology.read/write(..., format="ascii.ecsv")``, but should be tested regardless b/c they are used internally. """ def setup_class(self): self.functions = {"read": read_ecsv, "write": write_ecsv}
c269c8e008d640a6daf1bdb7f10049be8ab4bf114d9f5321c26cad50bc593841
# Licensed under a 3-clause BSD style license - see LICENSE.rst # STDLIB import json import os # THIRD PARTY import pytest # LOCAL import astropy.units as u from astropy.cosmology import units as cu from astropy.cosmology.connect import readwrite_registry from astropy.cosmology.core import Cosmology from .base import ReadWriteDirectTestBase, ReadWriteTestMixinBase ############################################################################### def read_json(filename, **kwargs): """Read JSON. Parameters ---------- filename : str **kwargs Keyword arguments into :meth:`~astropy.cosmology.Cosmology.from_format` Returns ------- `~astropy.cosmology.Cosmology` instance """ # read if isinstance(filename, (str, bytes, os.PathLike)): with open(filename, "r") as file: data = file.read() else: # file-like : this also handles errors in dumping data = filename.read() mapping = json.loads(data) # parse json mappable to dict # deserialize Quantity with u.add_enabled_units(cu.redshift): for k, v in mapping.items(): if isinstance(v, dict) and "value" in v and "unit" in v: mapping[k] = u.Quantity(v["value"], v["unit"]) for k, v in mapping.get("meta", {}).items(): # also the metadata if isinstance(v, dict) and "value" in v and "unit" in v: mapping["meta"][k] = u.Quantity(v["value"], v["unit"]) return Cosmology.from_format(mapping, format="mapping", **kwargs) def write_json(cosmology, file, *, overwrite=False): """Write Cosmology to JSON. Parameters ---------- cosmology : `astropy.cosmology.Cosmology` subclass instance file : path-like or file-like overwrite : bool (optional, keyword-only) """ data = cosmology.to_format("mapping") # start by turning into dict data["cosmology"] = data["cosmology"].__qualname__ # serialize Quantity for k, v in data.items(): if isinstance(v, u.Quantity): data[k] = {"value": v.value.tolist(), "unit": str(v.unit)} for k, v in data.get("meta", {}).items(): # also serialize the metadata if isinstance(v, u.Quantity): data["meta"][k] = {"value": v.value.tolist(), "unit": str(v.unit)} # check that file exists and whether to overwrite. if os.path.exists(file) and not overwrite: raise IOError(f"{file} exists. Set 'overwrite' to write over.") with open(file, "w") as write_file: json.dump(data, write_file) def json_identify(origin, filepath, fileobj, *args, **kwargs): return filepath is not None and filepath.endswith(".json") ############################################################################### class ReadWriteJSONTestMixin(ReadWriteTestMixinBase): """ Tests for a Cosmology[Read/Write] with ``format="json"``. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must dfine a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmology`` for an example. """ @pytest.fixture(scope="class", autouse=True) def register_and_unregister_json(self): """Setup & teardown for JSON read/write tests.""" # Register readwrite_registry.register_reader("json", Cosmology, read_json, force=True) readwrite_registry.register_writer("json", Cosmology, write_json, force=True) readwrite_registry.register_identifier("json", Cosmology, json_identify, force=True) yield # Run all tests in class # Unregister readwrite_registry.unregister_reader("json", Cosmology) readwrite_registry.unregister_writer("json", Cosmology) readwrite_registry.unregister_identifier("json", Cosmology) # ======================================================================== def test_readwrite_json_subclass_partial_info(self, cosmo_cls, cosmo, read, write, tmp_path, add_cu): """ Test writing from an instance and reading from that class. This works with missing information. """ fp = tmp_path / "test_readwrite_json_subclass_partial_info.json" # test write cosmo.write(fp, format="json") # partial information with open(fp, "r") as file: L = file.readlines()[0] L = L[: L.index('"cosmology":')] + L[L.index(", ") + 2 :] # remove cosmology i = L.index('"Tcmb0":') # delete Tcmb0 L = L[:i] + L[L.index(", ", L.index(", ", i) + 1) + 2 :] # second occurence tempfname = tmp_path / f"{cosmo.name}_temp.json" with open(tempfname, "w") as file: file.writelines([L]) # read with the same class that wrote fills in the missing info with # the default value got = cosmo_cls.read(tempfname, format="json") got2 = read(tempfname, format="json", cosmology=cosmo_cls) got3 = read(tempfname, format="json", cosmology=cosmo_cls.__qualname__) assert (got == got2) and (got2 == got3) # internal consistency # not equal, because Tcmb0 is changed, which also changes m_nu assert got != cosmo assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo # but the metadata is the same assert got.meta == cosmo.meta class TestReadWriteJSON(ReadWriteDirectTestBase, ReadWriteJSONTestMixin): """ Directly test ``read/write_json``. These are not public API and are discouraged from use, in favor of ``Cosmology.read/write(..., format="json")``, but should be tested regardless b/c they are used internally. """ def setup_class(self): self.functions = {"read": read_json, "write": write_json}
c39669490997993040cb4a77c3f3913897b25a83d1da1ad8aad2152250579086
# Licensed under a 3-clause BSD style license - see LICENSE.rst # THIRD PARTY import pytest # LOCAL from astropy.cosmology.io.cosmology import from_cosmology, to_cosmology from .base import IODirectTestBase, ToFromTestMixinBase ############################################################################### class ToFromCosmologyTestMixin(ToFromTestMixinBase): """ Tests for a Cosmology[To/From]Format with ``format="astropy.cosmology"``. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must define a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmology`` for an example. """ def test_to_cosmology_default(self, cosmo, to_format): """Test cosmology -> cosmology.""" newcosmo = to_format("astropy.cosmology") assert newcosmo is cosmo def test_from_not_cosmology(self, cosmo, from_format): """Test incorrect type in ``Cosmology``.""" with pytest.raises(TypeError): from_format("NOT A COSMOLOGY", format="astropy.cosmology") def test_from_cosmology_default(self, cosmo, from_format): """Test cosmology -> cosmology.""" newcosmo = from_format(cosmo) assert newcosmo is cosmo @pytest.mark.parametrize("format", [True, False, None, "astropy.cosmology"]) def test_is_equivalent_to_cosmology(self, cosmo, to_format, format): """Test :meth:`astropy.cosmology.Cosmology.is_equivalent`. This test checks that Cosmology equivalency can be extended to any Python object that can be converted to a Cosmology -- in this case a Cosmology! Since it's the identity conversion, the cosmology is always equivalent to itself, regardless of ``format``. """ obj = to_format("astropy.cosmology") assert obj is cosmo is_equiv = cosmo.is_equivalent(obj, format=format) assert is_equiv is True # equivalent to self class TestToFromCosmology(IODirectTestBase, ToFromCosmologyTestMixin): """Directly test ``to/from_cosmology``.""" def setup_class(self): self.functions = {"to": to_cosmology, "from": from_cosmology}
edc03b391c581fe7b7557cf5d45be14467338336a3d6ede0c24cf744f70cc9a6
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Testing :mod:`astropy.cosmology.flrw.wpwazpcdm`.""" ############################################################################## # IMPORTS # STDLIB # THIRD PARTY import pytest # LOCAL import astropy.cosmology.units as cu import astropy.units as u from astropy.cosmology import wpwaCDM from astropy.cosmology.parameter import Parameter from astropy.cosmology.tests.test_core import ParameterTestMixin from .test_base import FLRWSubclassTest from .test_w0wacdm import ParameterwaTestMixin ############################################################################## # TESTS ############################################################################## class ParameterwpTestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` wp on a Cosmology. wp is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_wp(self, cosmo_cls, cosmo): """Test Parameter ``wp``.""" # on the class assert isinstance(cosmo_cls.wp, Parameter) assert "at the pivot" in cosmo_cls.wp.__doc__ assert cosmo_cls.wp.unit is None # on the instance assert cosmo.wp is cosmo._wp assert cosmo.wp == self.cls_kwargs["wp"] def test_init_wp(self, cosmo_cls, ba): """Test initialization for values of ``wp``.""" # test that it works with units ba.arguments["wp"] = ba.arguments["wp"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.wp == ba.arguments["wp"] # also without units ba.arguments["wp"] = ba.arguments["wp"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.wp == ba.arguments["wp"] # must be dimensionless ba.arguments["wp"] = 10 * u.km with pytest.raises(TypeError): cosmo_cls(*ba.args, **ba.kwargs) class ParameterzpTestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` zp on a Cosmology. zp is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_zp(self, cosmo_cls, cosmo): """Test Parameter ``zp``.""" # on the class assert isinstance(cosmo_cls.zp, Parameter) assert "pivot redshift" in cosmo_cls.zp.__doc__ assert cosmo_cls.zp.unit == cu.redshift # on the instance assert cosmo.zp is cosmo._zp assert cosmo.zp == self.cls_kwargs["zp"] << cu.redshift def test_init_zp(self, cosmo_cls, ba): """Test initialization for values of ``zp``.""" # test that it works with units ba.arguments["zp"] = ba.arguments["zp"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.zp == ba.arguments["zp"] # also without units ba.arguments["zp"] = ba.arguments["zp"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.zp.value == ba.arguments["zp"] # must be dimensionless ba.arguments["zp"] = 10 * u.km with pytest.raises(u.UnitConversionError): cosmo_cls(*ba.args, **ba.kwargs) class TestwpwaCDM(FLRWSubclassTest, ParameterwpTestMixin, ParameterwaTestMixin, ParameterzpTestMixin): """Test :class:`astropy.cosmology.wpwaCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = wpwaCDM self.cls_kwargs.update(wp=-0.9, wa=0.2, zp=0.5) # =============================================================== # Method & Attribute Tests def test_clone_change_param(self, cosmo): """Test method ``.clone()`` changing a(many) Parameter(s).""" super().test_clone_change_param(cosmo) # `w` params c = cosmo.clone(wp=0.1, wa=0.2, zp=14) assert c.wp == 0.1 assert c.wa == 0.2 assert c.zp == 14 for n in (set(cosmo.__parameters__) - {"wp", "wa", "zp"}): v = getattr(c, n) if v is None: assert v is getattr(cosmo, n) else: assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1)) # @pytest.mark.parametrize("z", valid_zs) # TODO! recompute comparisons below def test_w(self, cosmo): """Test :meth:`astropy.cosmology.wpwaCDM.w`.""" # super().test_w(cosmo, z) assert u.allclose(cosmo.w(0.5), -0.9) assert u.allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]), [-0.94848485, -0.93333333, -0.9, -0.84666667, -0.82380952, -0.78266667]) def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ("wpwaCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s), Om0=0.27," " Ode0=0.73, wp=-0.9, wa=0.2, zp=0.5 redshift, Tcmb0=3.0 K," " Neff=3.04, m_nu=[0. 0. 0.] eV, Ob0=0.03)") assert repr(cosmo) == expected
dbc680aa0a2396a3cf1ac616f1b09fd59fe4e0a59c589098fa56032c33da0bb4
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Testing :mod:`astropy.cosmology.flrw.lambdacdm`.""" ############################################################################## # IMPORTS # STDLIB # THIRD PARTY import pytest # LOCAL import astropy.units as u from astropy.cosmology import FlatLambdaCDM, LambdaCDM from astropy.cosmology.flrw.lambdacdm import ellipkinc, hyp2f1 from astropy.cosmology.tests.helper import get_redshift_methods from astropy.cosmology.tests.test_core import invalid_zs, valid_zs from astropy.utils.compat.optional_deps import HAS_SCIPY from .test_base import FlatFLRWMixinTest, FLRWSubclassTest ############################################################################## # TESTS ############################################################################## @pytest.mark.skipif(HAS_SCIPY, reason="scipy is installed") def test_optional_deps_functions(): """Test stand-in functions when optional dependencies not installed.""" with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.special'"): ellipkinc() with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.special'"): hyp2f1() ############################################################################## class TestLambdaCDM(FLRWSubclassTest): """Test :class:`astropy.cosmology.LambdaCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = LambdaCDM # =============================================================== # Method & Attribute Tests _FLRW_redshift_methods = (get_redshift_methods(LambdaCDM, include_private=True, include_z2=False) - {"_dS_age"}) # `_dS_age` is removed because it doesn't strictly rely on the value of `z`, # so any input that doesn't trip up ``np.shape`` is "valid" @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize("z, exc", invalid_zs) @pytest.mark.parametrize('method', _FLRW_redshift_methods) def test_redshift_method_bad_input(self, cosmo, method, z, exc): """Test all the redshift methods for bad input.""" super().test_redshift_method_bad_input(cosmo, method, z, exc) @pytest.mark.parametrize("z", valid_zs) def test_w(self, cosmo, z): """Test :meth:`astropy.cosmology.LambdaCDM.w`.""" super().test_w(cosmo, z) w = cosmo.w(z) assert u.allclose(w, -1.0) def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ("LambdaCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s), Om0=0.27," " Ode0=0.73, Tcmb0=3.0 K, Neff=3.04, m_nu=[0. 0. 0.] eV," " Ob0=0.03)") assert repr(cosmo) == expected # ----------------------------------------------------------------------------- class TestFlatLambdaCDM(FlatFLRWMixinTest, TestLambdaCDM): """Test :class:`astropy.cosmology.FlatLambdaCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = FlatLambdaCDM @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize("z, exc", invalid_zs) @pytest.mark.parametrize('method', TestLambdaCDM._FLRW_redshift_methods - {"Otot"}) def test_redshift_method_bad_input(self, cosmo, method, z, exc): """Test all the redshift methods for bad input.""" super().test_redshift_method_bad_input(cosmo, method, z, exc) # =============================================================== # Method & Attribute Tests def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ("FlatLambdaCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s)," " Om0=0.27, Tcmb0=3.0 K, Neff=3.04, m_nu=[0. 0. 0.] eV," " Ob0=0.03)") assert repr(cosmo) == expected
c46caee2fb7c7380c452f9f632878b1364cb8e212114f5fb5dade2e4d5612010
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Testing :mod:`astropy.cosmology.flrw.base`.""" ############################################################################## # IMPORTS # STDLIB import abc import copy # THIRD PARTY import numpy as np import pytest import astropy.constants as const # LOCAL import astropy.units as u from astropy.cosmology import FLRW, FlatLambdaCDM, LambdaCDM, Planck18 from astropy.cosmology.core import _COSMOLOGY_CLASSES from astropy.cosmology.flrw.base import H0units_to_invs, a_B_c2, critdens_const, quad from astropy.cosmology.parameter import Parameter from astropy.cosmology.tests.helper import get_redshift_methods from astropy.cosmology.tests.test_core import CosmologySubclassTest as CosmologyTest from astropy.cosmology.tests.test_core import (FlatCosmologyMixinTest, ParameterTestMixin, invalid_zs, valid_zs) from astropy.utils.compat.optional_deps import HAS_SCIPY ############################################################################## # SETUP / TEARDOWN class SubFLRW(FLRW): def w(self, z): return super().w(z) ############################################################################## # TESTS ############################################################################## @pytest.mark.skipif(HAS_SCIPY, reason="scipy is installed") def test_optional_deps_functions(): """Test stand-in functions when optional dependencies not installed.""" with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.integrate'"): quad() ############################################################################## class ParameterH0TestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` H0 on a Cosmology. H0 is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_H0(self, cosmo_cls, cosmo): """Test Parameter ``H0``.""" unit = u.Unit("km/(s Mpc)") # on the class assert isinstance(cosmo_cls.H0, Parameter) assert "Hubble constant" in cosmo_cls.H0.__doc__ assert cosmo_cls.H0.unit == unit # validation assert cosmo_cls.H0.validate(cosmo, 1) == 1 * unit assert cosmo_cls.H0.validate(cosmo, 10 * unit) == 10 * unit with pytest.raises(ValueError, match="H0 is a non-scalar quantity"): cosmo_cls.H0.validate(cosmo, [1, 2]) # on the instance assert cosmo.H0 is cosmo._H0 assert cosmo.H0 == self._cls_args["H0"] assert isinstance(cosmo.H0, u.Quantity) and cosmo.H0.unit == unit def test_init_H0(self, cosmo_cls, ba): """Test initialization for values of ``H0``.""" # test that it works with units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.H0 == ba.arguments["H0"] # also without units ba.arguments["H0"] = ba.arguments["H0"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.H0.value == ba.arguments["H0"] # fails for non-scalar ba.arguments["H0"] = u.Quantity([70, 100], u.km / u.s / u.Mpc) with pytest.raises(ValueError, match="H0 is a non-scalar quantity"): cosmo_cls(*ba.args, **ba.kwargs) class ParameterOm0TestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` Om0 on a Cosmology. Om0 is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_Om0(self, cosmo_cls, cosmo): """Test Parameter ``Om0``.""" # on the class assert isinstance(cosmo_cls.Om0, Parameter) assert "Omega matter" in cosmo_cls.Om0.__doc__ # validation assert cosmo_cls.Om0.validate(cosmo, 1) == 1 assert cosmo_cls.Om0.validate(cosmo, 10 * u.one) == 10 with pytest.raises(ValueError, match="Om0 cannot be negative"): cosmo_cls.Om0.validate(cosmo, -1) # on the instance assert cosmo.Om0 is cosmo._Om0 assert cosmo.Om0 == self._cls_args["Om0"] assert isinstance(cosmo.Om0, float) def test_init_Om0(self, cosmo_cls, ba): """Test initialization for values of ``Om0``.""" # test that it works with units ba.arguments["Om0"] = ba.arguments["Om0"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Om0 == ba.arguments["Om0"] # also without units ba.arguments["Om0"] = ba.arguments["Om0"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Om0 == ba.arguments["Om0"] # fails for negative numbers ba.arguments["Om0"] = -0.27 with pytest.raises(ValueError, match="Om0 cannot be negative."): cosmo_cls(*ba.args, **ba.kwargs) class ParameterOde0TestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` Ode0 on a Cosmology. Ode0 is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_Parameter_Ode0(self, cosmo_cls): """Test Parameter ``Ode0`` on the class.""" assert isinstance(cosmo_cls.Ode0, Parameter) assert "Omega dark energy" in cosmo_cls.Ode0.__doc__ def test_Parameter_Ode0_validation(self, cosmo_cls, cosmo): """Test Parameter ``Ode0`` validation.""" assert cosmo_cls.Ode0.validate(cosmo, 1.1) == 1.1 assert cosmo_cls.Ode0.validate(cosmo, 10 * u.one) == 10.0 with pytest.raises(TypeError, match="only dimensionless"): cosmo_cls.Ode0.validate(cosmo, 10 * u.km) def test_Ode0(self, cosmo): """Test Parameter ``Ode0`` validation.""" # if Ode0 is a parameter, test its value assert cosmo.Ode0 is cosmo._Ode0 assert cosmo.Ode0 == self._cls_args["Ode0"] assert isinstance(cosmo.Ode0, float) def test_init_Ode0(self, cosmo_cls, ba): """Test initialization for values of ``Ode0``.""" # test that it works with units ba.arguments["Ode0"] = ba.arguments["Ode0"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Ode0 == ba.arguments["Ode0"] # also without units ba.arguments["Ode0"] = ba.arguments["Ode0"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Ode0 == ba.arguments["Ode0"] # Setting param to 0 respects that. Note this test uses ``Ode()``. ba.arguments["Ode0"] = 0.0 cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert u.allclose(cosmo.Ode([0, 1, 2, 3]), [0, 0, 0, 0]) assert u.allclose(cosmo.Ode(1), 0) # Must be dimensionless or have no units. Errors otherwise. ba.arguments["Ode0"] = 10 * u.km with pytest.raises(TypeError, match="only dimensionless"): cosmo_cls(*ba.args, **ba.kwargs) class ParameterTcmb0TestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` Tcmb0 on a Cosmology. Tcmb0 is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_Tcmb0(self, cosmo_cls, cosmo): """Test Parameter ``Tcmb0``.""" # on the class assert isinstance(cosmo_cls.Tcmb0, Parameter) assert "Temperature of the CMB" in cosmo_cls.Tcmb0.__doc__ assert cosmo_cls.Tcmb0.unit == u.K # validation assert cosmo_cls.Tcmb0.validate(cosmo, 1) == 1 * u.K assert cosmo_cls.Tcmb0.validate(cosmo, 10 * u.K) == 10 * u.K with pytest.raises(ValueError, match="Tcmb0 is a non-scalar quantity"): cosmo_cls.Tcmb0.validate(cosmo, [1, 2]) # on the instance assert cosmo.Tcmb0 is cosmo._Tcmb0 assert cosmo.Tcmb0 == self.cls_kwargs["Tcmb0"] assert isinstance(cosmo.Tcmb0, u.Quantity) and cosmo.Tcmb0.unit == u.K def test_init_Tcmb0(self, cosmo_cls, ba): """Test initialization for values of ``Tcmb0``.""" # test that it works with units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Tcmb0 == ba.arguments["Tcmb0"] # also without units ba.arguments["Tcmb0"] = ba.arguments["Tcmb0"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Tcmb0.value == ba.arguments["Tcmb0"] # must be a scalar ba.arguments["Tcmb0"] = u.Quantity([0.0, 2], u.K) with pytest.raises(ValueError, match="Tcmb0 is a non-scalar quantity"): cosmo_cls(*ba.args, **ba.kwargs) class ParameterNeffTestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` Neff on a Cosmology. Neff is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_Neff(self, cosmo_cls, cosmo): """Test Parameter ``Neff``.""" # on the class assert isinstance(cosmo_cls.Neff, Parameter) assert "Number of effective neutrino species" in cosmo_cls.Neff.__doc__ # validation assert cosmo_cls.Neff.validate(cosmo, 1) == 1 assert cosmo_cls.Neff.validate(cosmo, 10 * u.one) == 10 with pytest.raises(ValueError, match="Neff cannot be negative"): cosmo_cls.Neff.validate(cosmo, -1) # on the instance assert cosmo.Neff is cosmo._Neff assert cosmo.Neff == self.cls_kwargs.get("Neff", 3.04) assert isinstance(cosmo.Neff, float) def test_init_Neff(self, cosmo_cls, ba): """Test initialization for values of ``Neff``.""" # test that it works with units ba.arguments["Neff"] = ba.arguments["Neff"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Neff == ba.arguments["Neff"] # also without units ba.arguments["Neff"] = ba.arguments["Neff"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Neff == ba.arguments["Neff"] ba.arguments["Neff"] = -1 with pytest.raises(ValueError): cosmo_cls(*ba.args, **ba.kwargs) class Parameterm_nuTestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` m_nu on a Cosmology. m_nu is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_m_nu(self, cosmo_cls, cosmo): """Test Parameter ``m_nu``.""" # on the class assert isinstance(cosmo_cls.m_nu, Parameter) assert "Mass of neutrino species" in cosmo_cls.m_nu.__doc__ assert cosmo_cls.m_nu.unit == u.eV assert cosmo_cls.m_nu.equivalencies == u.mass_energy() assert cosmo_cls.m_nu.format_spec == "" # on the instance # assert cosmo.m_nu is cosmo._m_nu assert u.allclose(cosmo.m_nu, [0.0, 0.0, 0.0] * u.eV) # set differently depending on the other inputs if cosmo.Tnu0.value == 0: assert cosmo.m_nu is None elif not cosmo._massivenu: # only massless assert u.allclose(cosmo.m_nu, 0 * u.eV) elif self._nmasslessnu == 0: # only massive assert cosmo.m_nu == cosmo._massivenu_mass else: # a mix -- the most complicated case assert u.allclose(cosmo.m_nu[:self._nmasslessnu], 0 * u.eV) assert u.allclose(cosmo.m_nu[self._nmasslessnu], cosmo._massivenu_mass) def test_init_m_nu(self, cosmo_cls, ba): """Test initialization for values of ``m_nu``. Note this requires the class to have a property ``has_massive_nu``. """ # Test that it works when m_nu has units. cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert np.all(cosmo.m_nu == ba.arguments["m_nu"]) # (& checks len, unit) assert not cosmo.has_massive_nu assert cosmo.m_nu.unit == u.eV # explicitly check unit once. # And it works when m_nu doesn't have units. ba.arguments["m_nu"] = ba.arguments["m_nu"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert np.all(cosmo.m_nu.value == ba.arguments["m_nu"]) assert not cosmo.has_massive_nu # A negative m_nu raises an exception. tba = copy.copy(ba) tba.arguments["m_nu"] = u.Quantity([-0.3, 0.2, 0.1], u.eV) with pytest.raises(ValueError, match="invalid"): cosmo_cls(*tba.args, **tba.kwargs) def test_init_m_nu_and_Neff(self, cosmo_cls, ba): """Test initialization for values of ``m_nu`` and ``Neff``. Note this test requires ``Neff`` as constructor input, and a property ``has_massive_nu``. """ # Mismatch with Neff = wrong number of neutrinos tba = copy.copy(ba) tba.arguments["Neff"] = 4.05 tba.arguments["m_nu"] = u.Quantity([0.15, 0.2, 0.1], u.eV) with pytest.raises(ValueError, match="unexpected number of neutrino"): cosmo_cls(*tba.args, **tba.kwargs) # No neutrinos, but Neff tba.arguments["m_nu"] = 0 cosmo = cosmo_cls(*tba.args, **tba.kwargs) assert not cosmo.has_massive_nu assert len(cosmo.m_nu) == 4 assert cosmo.m_nu.unit == u.eV assert u.allclose(cosmo.m_nu, 0 * u.eV) # TODO! move this test when create ``test_nu_relative_density`` assert u.allclose(cosmo.nu_relative_density(1.0), 0.22710731766 * 4.05, rtol=1e-6) # All massive neutrinos case, len from Neff tba.arguments["m_nu"] = 0.1 * u.eV cosmo = cosmo_cls(*tba.args, **tba.kwargs) assert cosmo.has_massive_nu assert len(cosmo.m_nu) == 4 assert cosmo.m_nu.unit == u.eV assert u.allclose(cosmo.m_nu, [0.1, 0.1, 0.1, 0.1] * u.eV) def test_init_m_nu_override_by_Tcmb0(self, cosmo_cls, ba): """Test initialization for values of ``m_nu``. Note this test requires ``Tcmb0`` as constructor input, and a property ``has_massive_nu``. """ # If Neff = 0, m_nu is None. tba = copy.copy(ba) tba.arguments["Neff"] = 0 cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.m_nu is None assert not cosmo.has_massive_nu # If Tcmb0 = 0, m_nu is None tba = copy.copy(ba) tba.arguments["Tcmb0"] = 0 cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.m_nu is None assert not cosmo.has_massive_nu class ParameterOb0TestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` Ob0 on a Cosmology. Ob0 is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_Ob0(self, cosmo_cls, cosmo): """Test Parameter ``Ob0``.""" # on the class assert isinstance(cosmo_cls.Ob0, Parameter) assert "Omega baryon;" in cosmo_cls.Ob0.__doc__ # validation assert cosmo_cls.Ob0.validate(cosmo, None) is None assert cosmo_cls.Ob0.validate(cosmo, 0.1) == 0.1 assert cosmo_cls.Ob0.validate(cosmo, 0.1 * u.one) == 0.1 with pytest.raises(ValueError, match="Ob0 cannot be negative"): cosmo_cls.Ob0.validate(cosmo, -1) with pytest.raises(ValueError, match="baryonic density can not be larger"): cosmo_cls.Ob0.validate(cosmo, cosmo.Om0 + 1) # on the instance assert cosmo.Ob0 is cosmo._Ob0 assert cosmo.Ob0 == 0.03 def test_init_Ob0(self, cosmo_cls, ba): """Test initialization for values of ``Ob0``.""" # test that it works with units assert isinstance(ba.arguments["Ob0"], u.Quantity) cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Ob0 == ba.arguments["Ob0"] # also without units ba.arguments["Ob0"] = ba.arguments["Ob0"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Ob0 == ba.arguments["Ob0"] # Setting param to 0 respects that. Note this test uses ``Ob()``. ba.arguments["Ob0"] = 0.0 cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Ob0 == 0.0 if not self.abstract_w: assert u.allclose(cosmo.Ob(1), 0) assert u.allclose(cosmo.Ob([0, 1, 2, 3]), [0, 0, 0, 0]) # Negative Ob0 errors tba = copy.copy(ba) tba.arguments["Ob0"] = -0.04 with pytest.raises(ValueError, match="Ob0 cannot be negative"): cosmo_cls(*tba.args, **tba.kwargs) # Ob0 > Om0 errors tba.arguments["Ob0"] = tba.arguments["Om0"] + 0.1 with pytest.raises(ValueError, match="baryonic density can not be larger"): cosmo_cls(*tba.args, **tba.kwargs) # No baryons specified means baryon-specific methods fail. tba = copy.copy(ba) tba.arguments.pop("Ob0", None) cosmo = cosmo_cls(*tba.args, **tba.kwargs) with pytest.raises(ValueError): cosmo.Ob(1) # also means DM fraction is undefined with pytest.raises(ValueError): cosmo.Odm(1) # The default value is None assert cosmo_cls._init_signature.parameters["Ob0"].default is None class TestFLRW(CosmologyTest, ParameterH0TestMixin, ParameterOm0TestMixin, ParameterOde0TestMixin, ParameterTcmb0TestMixin, ParameterNeffTestMixin, Parameterm_nuTestMixin, ParameterOb0TestMixin): """Test :class:`astropy.cosmology.FLRW`.""" abstract_w = True def setup_class(self): """ Setup for testing. FLRW is abstract, so tests are done on a subclass. """ # make sure SubCosmology is known _COSMOLOGY_CLASSES["SubFLRW"] = SubFLRW self.cls = SubFLRW self._cls_args = dict(H0=70 * u.km / u.s / u.Mpc, Om0=0.27 * u.one, Ode0=0.73 * u.one) self.cls_kwargs = dict(Tcmb0=3.0 * u.K, Ob0=0.03 * u.one, name=self.__class__.__name__, meta={"a": "b"}) def teardown_class(self): super().teardown_class(self) _COSMOLOGY_CLASSES.pop("SubFLRW", None) @pytest.fixture(scope="class") def nonflatcosmo(self): """A non-flat cosmology used in equivalence tests.""" return LambdaCDM(70, 0.4, 0.8) # =============================================================== # Method & Attribute Tests def test_init(self, cosmo_cls): """Test initialization.""" super().test_init(cosmo_cls) # TODO! tests for initializing calculated values, e.g. `h` # TODO! transfer tests for initializing neutrinos def test_init_Tcmb0_zeroing(self, cosmo_cls, ba): """Test if setting Tcmb0 parameter to 0 influences other parameters. TODO: consider moving this test to ``FLRWSubclassTest`` """ ba.arguments["Tcmb0"] = 0.0 cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Ogamma0 == 0.0 assert cosmo.Onu0 == 0.0 if not self.abstract_w: assert u.allclose(cosmo.Ogamma(1.5), [0, 0, 0, 0]) assert u.allclose(cosmo.Ogamma([0, 1, 2, 3]), [0, 0, 0, 0]) assert u.allclose(cosmo.Onu(1.5), [0, 0, 0, 0]) assert u.allclose(cosmo.Onu([0, 1, 2, 3]), [0, 0, 0, 0]) # --------------------------------------------------------------- # Properties def test_Odm0(self, cosmo_cls, cosmo): """Test property ``Odm0``.""" # on the class assert isinstance(cosmo_cls.Odm0, property) assert cosmo_cls.Odm0.fset is None # immutable # on the instance assert cosmo.Odm0 is cosmo._Odm0 # Odm0 can be None, if Ob0 is None. Otherwise DM = matter - baryons. if cosmo.Ob0 is None: assert cosmo.Odm0 is None else: assert np.allclose(cosmo.Odm0, cosmo.Om0 - cosmo.Ob0) def test_Ok0(self, cosmo_cls, cosmo): """Test property ``Ok0``.""" # on the class assert isinstance(cosmo_cls.Ok0, property) assert cosmo_cls.Ok0.fset is None # immutable # on the instance assert cosmo.Ok0 is cosmo._Ok0 assert np.allclose(cosmo.Ok0, 1.0 - (cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0)) def test_is_flat(self, cosmo_cls, cosmo): """Test property ``is_flat``.""" # on the class assert isinstance(cosmo_cls.is_flat, property) assert cosmo_cls.is_flat.fset is None # immutable # on the instance assert isinstance(cosmo.is_flat, bool) assert cosmo.is_flat is bool((cosmo.Ok0 == 0.0) and (cosmo.Otot0 == 1.0)) def test_Tnu0(self, cosmo_cls, cosmo): """Test property ``Tnu0``.""" # on the class assert isinstance(cosmo_cls.Tnu0, property) assert cosmo_cls.Tnu0.fset is None # immutable # on the instance assert cosmo.Tnu0 is cosmo._Tnu0 assert cosmo.Tnu0.unit == u.K assert u.allclose(cosmo.Tnu0, 0.7137658555036082 * cosmo.Tcmb0, rtol=1e-5) def test_has_massive_nu(self, cosmo_cls, cosmo): """Test property ``has_massive_nu``.""" # on the class assert isinstance(cosmo_cls.has_massive_nu, property) assert cosmo_cls.has_massive_nu.fset is None # immutable # on the instance if cosmo.Tnu0 == 0: assert cosmo.has_massive_nu is False else: assert cosmo.has_massive_nu is cosmo._massivenu def test_h(self, cosmo_cls, cosmo): """Test property ``h``.""" # on the class assert isinstance(cosmo_cls.h, property) assert cosmo_cls.h.fset is None # immutable # on the instance assert cosmo.h is cosmo._h assert np.allclose(cosmo.h, cosmo.H0.value / 100.0) def test_hubble_time(self, cosmo_cls, cosmo): """Test property ``hubble_time``.""" # on the class assert isinstance(cosmo_cls.hubble_time, property) assert cosmo_cls.hubble_time.fset is None # immutable # on the instance assert cosmo.hubble_time is cosmo._hubble_time assert u.allclose(cosmo.hubble_time, (1 / cosmo.H0) << u.Gyr) def test_hubble_distance(self, cosmo_cls, cosmo): """Test property ``hubble_distance``.""" # on the class assert isinstance(cosmo_cls.hubble_distance, property) assert cosmo_cls.hubble_distance.fset is None # immutable # on the instance assert cosmo.hubble_distance is cosmo._hubble_distance assert cosmo.hubble_distance == (const.c / cosmo._H0).to(u.Mpc) def test_critical_density0(self, cosmo_cls, cosmo): """Test property ``critical_density0``.""" # on the class assert isinstance(cosmo_cls.critical_density0, property) assert cosmo_cls.critical_density0.fset is None # immutable # on the instance assert cosmo.critical_density0 is cosmo._critical_density0 assert cosmo.critical_density0.unit == u.g / u.cm ** 3 cd0value = critdens_const * (cosmo.H0.value * H0units_to_invs) ** 2 assert cosmo.critical_density0.value == cd0value def test_Ogamma0(self, cosmo_cls, cosmo): """Test property ``Ogamma0``.""" # on the class assert isinstance(cosmo_cls.Ogamma0, property) assert cosmo_cls.Ogamma0.fset is None # immutable # on the instance assert cosmo.Ogamma0 is cosmo._Ogamma0 # Ogamma cor \propto T^4/rhocrit expect = a_B_c2 * cosmo.Tcmb0.value ** 4 / cosmo.critical_density0.value assert np.allclose(cosmo.Ogamma0, expect) # check absolute equality to 0 if Tcmb0 is 0 if cosmo.Tcmb0 == 0: assert cosmo.Ogamma0 == 0 def test_Onu0(self, cosmo_cls, cosmo): """Test property ``Onu0``.""" # on the class assert isinstance(cosmo_cls.Onu0, property) assert cosmo_cls.Onu0.fset is None # immutable # on the instance assert cosmo.Onu0 is cosmo._Onu0 # neutrino temperature <= photon temperature since the neutrinos # decouple first. if cosmo.has_massive_nu: # Tcmb0 > 0 & has massive # check the expected formula assert cosmo.Onu0 == cosmo.Ogamma0 * cosmo.nu_relative_density(0) # a sanity check on on the ratio of neutrinos to photons # technically it could be 1, but not for any of the tested cases. assert cosmo.nu_relative_density(0) <= 1 elif cosmo.Tcmb0 == 0: assert cosmo.Onu0 == 0 else: # check the expected formula assert cosmo.Onu0 == 0.22710731766 * cosmo._Neff * cosmo.Ogamma0 # and check compatibility with nu_relative_density assert np.allclose(cosmo.nu_relative_density(0), 0.22710731766 * cosmo._Neff) def test_Otot0(self, cosmo): """Test :attr:`astropy.cosmology.FLRW.Otot0`.""" assert cosmo.Otot0 == cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0 + cosmo.Ode0 + cosmo.Ok0 # --------------------------------------------------------------- # Methods def test_w(self, cosmo): """Test abstract :meth:`astropy.cosmology.FLRW.w`.""" with pytest.raises(NotImplementedError, match="not implemented"): cosmo.w(1) def test_Otot(self, cosmo): """Test :meth:`astropy.cosmology.FLRW.Otot`.""" exception = NotImplementedError if HAS_SCIPY else ModuleNotFoundError with pytest.raises(exception): assert cosmo.Otot(1) def test_efunc_vs_invefunc(self, cosmo): """ Test that efunc and inv_efunc give inverse values. Here they just fail b/c no ``w(z)`` or no scipy. """ exception = NotImplementedError if HAS_SCIPY else ModuleNotFoundError with pytest.raises(exception): cosmo.efunc(0.5) with pytest.raises(exception): cosmo.inv_efunc(0.5) # --------------------------------------------------------------- # from Cosmology def test_clone_change_param(self, cosmo): """Test method ``.clone()`` changing a(many) Parameter(s).""" super().test_clone_change_param(cosmo) # don't change any values kwargs = cosmo._init_arguments.copy() kwargs.pop("name", None) # make sure not setting name c = cosmo.clone(**kwargs) assert c.__class__ == cosmo.__class__ assert c.name == cosmo.name + " (modified)" assert c.is_equivalent(cosmo) # change ``H0`` # Note that H0 affects Ode0 because it changes Ogamma0 c = cosmo.clone(H0=100) assert c.__class__ == cosmo.__class__ assert c.name == cosmo.name + " (modified)" assert c.H0.value == 100 for n in (set(cosmo.__parameters__) - {"H0"}): v = getattr(c, n) if v is None: assert v is getattr(cosmo, n) else: assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1)) assert not u.allclose(c.Ogamma0, cosmo.Ogamma0) assert not u.allclose(c.Onu0, cosmo.Onu0) # change multiple things c = cosmo.clone(name="new name", H0=100, Tcmb0=2.8, meta=dict(zz="tops")) assert c.__class__ == cosmo.__class__ assert c.name == "new name" assert c.H0.value == 100 assert c.Tcmb0.value == 2.8 assert c.meta == {**cosmo.meta, **dict(zz="tops")} for n in (set(cosmo.__parameters__) - {"H0", "Tcmb0"}): v = getattr(c, n) if v is None: assert v is getattr(cosmo, n) else: assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1)) assert not u.allclose(c.Ogamma0, cosmo.Ogamma0) assert not u.allclose(c.Onu0, cosmo.Onu0) assert not u.allclose(c.Tcmb0.value, cosmo.Tcmb0.value) def test_is_equivalent(self, cosmo): """Test :meth:`astropy.cosmology.FLRW.is_equivalent`.""" super().test_is_equivalent(cosmo) # pass to CosmologySubclassTest # test against a FlatFLRWMixin # case (3) in FLRW.is_equivalent if isinstance(cosmo, FlatLambdaCDM): assert cosmo.is_equivalent(Planck18) assert Planck18.is_equivalent(cosmo) else: assert not cosmo.is_equivalent(Planck18) assert not Planck18.is_equivalent(cosmo) class FLRWSubclassTest(TestFLRW): """ Test subclasses of :class:`astropy.cosmology.FLRW`. This is broken away from ``TestFLRW``, because ``FLRW`` is an ABC and subclasses must override some methods. """ abstract_w = False @abc.abstractmethod def setup_class(self): """Setup for testing.""" super().setup_class(self) # =============================================================== # Method & Attribute Tests _FLRW_redshift_methods = get_redshift_methods(FLRW, include_private=True, include_z2=False) @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize("z, exc", invalid_zs) @pytest.mark.parametrize('method', _FLRW_redshift_methods) def test_redshift_method_bad_input(self, cosmo, method, z, exc): """Test all the redshift methods for bad input.""" with pytest.raises(exc): getattr(cosmo, method)(z) @pytest.mark.parametrize("z", valid_zs) @abc.abstractmethod def test_w(self, cosmo, z): """Test :meth:`astropy.cosmology.FLRW.w`. Since ``w`` is abstract, each test class needs to define further tests. """ # super().test_w(cosmo, z) # NOT b/c abstract `w(z)` w = cosmo.w(z) assert np.shape(w) == np.shape(z) # test same shape assert u.Quantity(w).unit == u.one # test no units or dimensionless # ------------------------------------------- @pytest.mark.parametrize("z", valid_zs) def test_Otot(self, cosmo, z): """Test :meth:`astropy.cosmology.FLRW.Otot`.""" # super().test_Otot(cosmo) # NOT b/c abstract `w(z)` assert np.allclose( cosmo.Otot(z), cosmo.Om(z) + cosmo.Ogamma(z) + cosmo.Onu(z) + cosmo.Ode(z) + cosmo.Ok(z)) # --------------------------------------------------------------- def test_efunc_vs_invefunc(self, cosmo): """Test that ``efunc`` and ``inv_efunc`` give inverse values. Note that the test doesn't need scipy because it doesn't need to call ``de_density_scale``. """ # super().test_efunc_vs_invefunc(cosmo) # NOT b/c abstract `w(z)` z0 = 0.5 z = np.array([0.5, 1.0, 2.0, 5.0]) assert np.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) assert np.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) # ----------------------------------------------------------------------------- class ParameterFlatOde0TestMixin(ParameterOde0TestMixin): """Tests for `astropy.cosmology.Parameter` Ode0 on a flat Cosmology. This will augment or override some tests in ``ParameterOde0TestMixin``. Ode0 is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_Parameter_Ode0(self, cosmo_cls): """Test Parameter ``Ode0`` on the class.""" super().test_Parameter_Ode0(cosmo_cls) assert cosmo_cls.Ode0.derived in (True, np.True_) def test_Ode0(self, cosmo): """Test no-longer-Parameter ``Ode0``.""" assert cosmo.Ode0 is cosmo._Ode0 assert cosmo.Ode0 == 1.0 - (cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0) def test_init_Ode0(self, cosmo_cls, ba): """Test initialization for values of ``Ode0``.""" cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Ode0 == 1.0 - (cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0 + cosmo.Ok0) # Ode0 is not in the signature with pytest.raises(TypeError, match="Ode0"): cosmo_cls(*ba.args, **ba.kwargs, Ode0=1) class FlatFLRWMixinTest(FlatCosmologyMixinTest, ParameterFlatOde0TestMixin): """Tests for :class:`astropy.cosmology.FlatFLRWMixin` subclasses. E.g to use this class:: class TestFlatSomeFLRW(FlatFLRWMixinTest, TestSomeFLRW): ... """ def setup_class(self): """Setup for testing. Set up as for regular FLRW test class, but remove dark energy component since flat cosmologies are forbidden Ode0 as an argument, see ``test_init_subclass``. """ super().setup_class(self) self._cls_args.pop("Ode0") # =============================================================== # Method & Attribute Tests # --------------------------------------------------------------- # class-level def test_init_subclass(self, cosmo_cls): """Test initializing subclass, mostly that can't have Ode0 in init.""" super().test_init_subclass(cosmo_cls) with pytest.raises(TypeError, match="subclasses of"): class HASOde0SubClass(cosmo_cls): def __init__(self, Ode0): pass _COSMOLOGY_CLASSES.pop(HASOde0SubClass.__qualname__, None) # --------------------------------------------------------------- # instance-level def test_init(self, cosmo_cls): super().test_init(cosmo_cls) cosmo = cosmo_cls(*self.cls_args, **self.cls_kwargs) assert cosmo._Ok0 == 0.0 assert cosmo._Ode0 == 1.0 - (cosmo._Om0 + cosmo._Ogamma0 + cosmo._Onu0 + cosmo._Ok0) def test_Ok0(self, cosmo_cls, cosmo): """Test property ``Ok0``.""" super().test_Ok0(cosmo_cls, cosmo) # for flat cosmologies, Ok0 is not *close* to 0, it *is* 0 assert cosmo.Ok0 == 0.0 def test_Otot0(self, cosmo): """Test :attr:`astropy.cosmology.FLRW.Otot0`. Should always be 1.""" super().test_Otot0(cosmo) # for flat cosmologies, Otot0 is not *close* to 1, it *is* 1 assert cosmo.Otot0 == 1.0 @pytest.mark.parametrize("z", valid_zs) def test_Otot(self, cosmo, z): """Test :meth:`astropy.cosmology.FLRW.Otot`. Should always be 1.""" super().test_Otot(cosmo, z) # for flat cosmologies, Otot is 1, within precision. assert u.allclose(cosmo.Otot(z), 1.0) @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize("z, exc", invalid_zs) @pytest.mark.parametrize('method', FLRWSubclassTest._FLRW_redshift_methods - {"Otot"}) def test_redshift_method_bad_input(self, cosmo, method, z, exc): """Test all the redshift methods for bad input.""" super().test_redshift_method_bad_input(cosmo, method, z, exc) # --------------------------------------------------------------- def test_is_equivalent(self, cosmo, nonflatcosmo): """Test :meth:`astropy.cosmology.FLRW.is_equivalent`.""" super().test_is_equivalent(cosmo) # pass to TestFLRW # against non-flat Cosmology assert not cosmo.is_equivalent(nonflatcosmo) assert not nonflatcosmo.is_equivalent(cosmo) # non-flat version of class nonflat_cosmo_cls = cosmo.__class__.mro()[3] # keys check in `test_is_equivalent_nonflat_class_different_params` # non-flat nonflat = nonflat_cosmo_cls(*self.cls_args, Ode0=0.9, **self.cls_kwargs) assert not nonflat.is_equivalent(cosmo) assert not cosmo.is_equivalent(nonflat) # flat, but not FlatFLRWMixin flat = nonflat_cosmo_cls(*self.cls_args, Ode0=1.0 - cosmo.Om0 - cosmo.Ogamma0 - cosmo.Onu0, **self.cls_kwargs) flat._Ok0 = 0.0 assert flat.is_equivalent(cosmo) assert cosmo.is_equivalent(flat) def test_repr(self, cosmo_cls, cosmo): """ Test method ``.__repr__()``. Skip non-flat superclass test. e.g. `TestFlatLambdaCDDM` -> `FlatFLRWMixinTest` vs `TestFlatLambdaCDDM` -> `TestLambdaCDDM` -> `FlatFLRWMixinTest` """ FLRWSubclassTest.test_repr(self, cosmo_cls, cosmo) # test eliminated Ode0 from parameters assert "Ode0" not in repr(cosmo)
fedb7cc8f78de6c576d024048015860c13e3dca78237835a2a92b6b6dc846159
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Testing :mod:`astropy.cosmology.flrw.w0wzcdm`.""" ############################################################################## # IMPORTS # STDLIB # THIRD PARTY import pytest # LOCAL import astropy.units as u from astropy.cosmology import w0wzCDM from astropy.cosmology.parameter import Parameter from astropy.cosmology.tests.test_core import ParameterTestMixin from .test_base import FLRWSubclassTest from .test_w0cdm import Parameterw0TestMixin ############################################################################## # TESTS ############################################################################## class ParameterwzTestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` wz on a Cosmology. wz is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_wz(self, cosmo_cls, cosmo): """Test Parameter ``wz``.""" # on the class assert isinstance(cosmo_cls.wz, Parameter) assert "Derivative of the dark energy" in cosmo_cls.wz.__doc__ assert cosmo_cls.wz.unit is None # on the instance assert cosmo.wz is cosmo._wz assert cosmo.wz == self.cls_kwargs["wz"] def test_init_wz(self, cosmo_cls, ba): """Test initialization for values of ``wz``.""" # test that it works with units ba.arguments["wz"] = ba.arguments["wz"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.wz == ba.arguments["wz"] # also without units ba.arguments["wz"] = ba.arguments["wz"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.wz == ba.arguments["wz"] # must be dimensionless ba.arguments["wz"] = 10 * u.km with pytest.raises(TypeError): cosmo_cls(*ba.args, **ba.kwargs) class Testw0wzCDM(FLRWSubclassTest, Parameterw0TestMixin, ParameterwzTestMixin): """Test :class:`astropy.cosmology.w0wzCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = w0wzCDM self.cls_kwargs.update(w0=-1, wz=0.5) # =============================================================== # Method & Attribute Tests def test_clone_change_param(self, cosmo): """Test method ``.clone()`` changing a(many) Parameter(s).""" super().test_clone_change_param(cosmo) # `w` params c = cosmo.clone(w0=0.1, wz=0.2) assert c.w0 == 0.1 assert c.wz == 0.2 for n in (set(cosmo.__parameters__) - {"w0", "wz"}): v = getattr(c, n) if v is None: assert v is getattr(cosmo, n) else: assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1)) # @pytest.mark.parametrize("z", valid_zs) # TODO! recompute comparisons below def test_w(self, cosmo): """Test :meth:`astropy.cosmology.w0wzCDM.w`.""" # super().test_w(cosmo, z) assert u.allclose(cosmo.w(1.0), -0.5) assert u.allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]), [-1.0, -0.75, -0.5, -0.25, 0.15]) def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ("w0wzCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s), Om0=0.27," " Ode0=0.73, w0=-1.0, wz=0.5, Tcmb0=3.0 K, Neff=3.04," " m_nu=[0. 0. 0.] eV, Ob0=0.03)") assert repr(cosmo) == expected
794539e40e64142488a7af7e532cf524114c7d8ba082daa3ea06e5974abeac55
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Testing :mod:`astropy.cosmology.flrw.w0wacdm`.""" ############################################################################## # IMPORTS # STDLIB # THIRD PARTY import pytest # LOCAL import astropy.units as u from astropy.cosmology import Flatw0waCDM, w0waCDM from astropy.cosmology.parameter import Parameter from astropy.cosmology.tests.test_core import ParameterTestMixin from .test_base import FlatFLRWMixinTest, FLRWSubclassTest from .test_w0cdm import Parameterw0TestMixin ############################################################################## # TESTS ############################################################################## class ParameterwaTestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` wa on a Cosmology. wa is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_wa(self, cosmo_cls, cosmo): """Test Parameter ``wa``.""" # on the class assert isinstance(cosmo_cls.wa, Parameter) assert "Negative derivative" in cosmo_cls.wa.__doc__ assert cosmo_cls.wa.unit is None # on the instance assert cosmo.wa is cosmo._wa assert cosmo.wa == self.cls_kwargs["wa"] def test_init_wa(self, cosmo_cls, ba): """Test initialization for values of ``wa``.""" # test that it works with units ba.arguments["wa"] = ba.arguments["wa"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.wa == ba.arguments["wa"] # also without units ba.arguments["wa"] = ba.arguments["wa"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.wa == ba.arguments["wa"] # must be dimensionless ba.arguments["wa"] = 10 * u.km with pytest.raises(TypeError): cosmo_cls(*ba.args, **ba.kwargs) class Testw0waCDM(FLRWSubclassTest, Parameterw0TestMixin, ParameterwaTestMixin): """Test :class:`astropy.cosmology.w0waCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = w0waCDM self.cls_kwargs.update(w0=-1, wa=-0.5) # =============================================================== # Method & Attribute Tests def test_clone_change_param(self, cosmo): """Test method ``.clone()`` changing a(many) Parameter(s).""" super().test_clone_change_param(cosmo) # `w` params c = cosmo.clone(w0=0.1, wa=0.2) assert c.w0 == 0.1 assert c.wa == 0.2 for n in (set(cosmo.__parameters__) - {"w0", "wa"}): v = getattr(c, n) if v is None: assert v is getattr(cosmo, n) else: assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1)) # @pytest.mark.parametrize("z", valid_zs) # TODO! recompute comparisons below def test_w(self, cosmo): """Test :meth:`astropy.cosmology.w0waCDM.w`.""" # super().test_w(cosmo, z) assert u.allclose(cosmo.w(1.0), -1.25) assert u.allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]), [-1, -1.16666667, -1.25, -1.3, -1.34848485]) def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ("w0waCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s), Om0=0.27," " Ode0=0.73, w0=-1.0, wa=-0.5, Tcmb0=3.0 K, Neff=3.04," " m_nu=[0. 0. 0.] eV, Ob0=0.03)") assert repr(cosmo) == expected # ----------------------------------------------------------------------------- class TestFlatw0waCDM(FlatFLRWMixinTest, Testw0waCDM): """Test :class:`astropy.cosmology.Flatw0waCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = Flatw0waCDM self.cls_kwargs.update(w0=-1, wa=-0.5) def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ("Flatw0waCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s)," " Om0=0.27, w0=-1.0, wa=-0.5, Tcmb0=3.0 K, Neff=3.04," " m_nu=[0. 0. 0.] eV, Ob0=0.03)") assert repr(cosmo) == expected
de0f148f1d3f48c5b7770cb07b8915f5b206c628f83fd681f2650fdd5e8611ed
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Testing :mod:`astropy.cosmology.flrw.w0cdm`.""" ############################################################################## # IMPORTS # STDLIB # THIRD PARTY import pytest # LOCAL import astropy.units as u from astropy.cosmology import FlatwCDM, wCDM from astropy.cosmology.parameter import Parameter from astropy.cosmology.tests.test_core import ParameterTestMixin, valid_zs from .test_base import FlatFLRWMixinTest, FLRWSubclassTest ############################################################################## # TESTS ############################################################################## class Parameterw0TestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` w0 on a Cosmology. w0 is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_w0(self, cosmo_cls, cosmo): """Test Parameter ``w0``.""" # on the class assert isinstance(cosmo_cls.w0, Parameter) assert "Dark energy equation of state" in cosmo_cls.w0.__doc__ assert cosmo_cls.w0.unit is None # on the instance assert cosmo.w0 is cosmo._w0 assert cosmo.w0 == self.cls_kwargs["w0"] def test_init_w0(self, cosmo_cls, ba): """Test initialization for values of ``w0``.""" # test that it works with units ba.arguments["w0"] = ba.arguments["w0"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.w0 == ba.arguments["w0"] # also without units ba.arguments["w0"] = ba.arguments["w0"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.w0 == ba.arguments["w0"] # must be dimensionless ba.arguments["w0"] = 10 * u.km with pytest.raises(TypeError): cosmo_cls(*ba.args, **ba.kwargs) class TestwCDM(FLRWSubclassTest, Parameterw0TestMixin): """Test :class:`astropy.cosmology.wCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = wCDM self.cls_kwargs.update(w0=-0.5) # =============================================================== # Method & Attribute Tests def test_clone_change_param(self, cosmo): """Test method ``.clone()`` changing a(many) Parameter(s).""" super().test_clone_change_param(cosmo) # `w` params c = cosmo.clone(w0=0.1) assert c.w0 == 0.1 for n in (set(cosmo.__parameters__) - {"w0"}): v = getattr(c, n) if v is None: assert v is getattr(cosmo, n) else: assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1)) @pytest.mark.parametrize("z", valid_zs) def test_w(self, cosmo, z): """Test :meth:`astropy.cosmology.wCDM.w`.""" super().test_w(cosmo, z) w = cosmo.w(z) assert u.allclose(w, self.cls_kwargs["w0"]) def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ("wCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s), Om0=0.27," " Ode0=0.73, w0=-0.5, Tcmb0=3.0 K, Neff=3.04," " m_nu=[0. 0. 0.] eV, Ob0=0.03)") assert repr(cosmo) == expected # ----------------------------------------------------------------------------- class TestFlatwCDM(FlatFLRWMixinTest, TestwCDM): """Test :class:`astropy.cosmology.FlatwCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = FlatwCDM self.cls_kwargs.update(w0=-0.5) def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ("FlatwCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s), Om0=0.27," " w0=-0.5, Tcmb0=3.0 K, Neff=3.04, m_nu=[0. 0. 0.] eV," " Ob0=0.03)") assert repr(cosmo) == expected
dfaf2c4855f9658a13b8c4311b3e7f7bffa92c19876148b2a9ddf1b9a4103586
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst # # Astropy documentation build configuration file. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this file. # # All configuration values have a default. Some values are defined in # the global Astropy configuration which is loaded here before anything else. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('..')) # IMPORTANT: the above commented section was generated by sphinx-quickstart, but # is *NOT* appropriate for astropy or Astropy affiliated packages. It is left # commented out with this explanation to make it clear why this should not be # done. If the sys.path entry above is added, when the astropy.sphinx.conf # import occurs, it will import the *source* version of astropy instead of the # version installed (if invoked as "make html" or directly with sphinx), or the # version in the build directory. # Thus, any C-extensions that are needed to build the documentation will *not* # be accessible, and the documentation will not build correctly. # See sphinx_astropy.conf for which values are set there. import os import sys import configparser from datetime import datetime from importlib import metadata import doctest from packaging.requirements import Requirement from packaging.specifiers import SpecifierSet # -- Check for missing dependencies ------------------------------------------- missing_requirements = {} for line in metadata.requires('astropy'): if 'extra == "docs"' in line: req = Requirement(line.split(';')[0]) req_package = req.name.lower() req_specifier = str(req.specifier) try: version = metadata.version(req_package) except metadata.PackageNotFoundError: missing_requirements[req_package] = req_specifier if version not in SpecifierSet(req_specifier, prereleases=True): missing_requirements[req_package] = req_specifier if missing_requirements: print('The following packages could not be found and are required to ' 'build the documentation:') for key, val in missing_requirements.items(): print(f' * {key} {val}') print('Please install the "docs" requirements.') sys.exit(1) from sphinx_astropy.conf.v1 import * # noqa # -- Plot configuration ------------------------------------------------------- plot_rcparams = {} plot_rcparams['figure.figsize'] = (6, 6) plot_rcparams['savefig.facecolor'] = 'none' plot_rcparams['savefig.bbox'] = 'tight' plot_rcparams['axes.labelsize'] = 'large' plot_rcparams['figure.subplot.hspace'] = 0.5 plot_apply_rcparams = True plot_html_show_source_link = False plot_formats = ['png', 'svg', 'pdf'] # Don't use the default - which includes a numpy and matplotlib import plot_pre_code = "" # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.7' # To perform a Sphinx version check that needs to be more specific than # major.minor, call `check_sphinx_version("X.Y.Z")` here. check_sphinx_version("1.2.1") # noqa: F405 # The intersphinx_mapping in sphinx_astropy.sphinx refers to astropy for # the benefit of other packages who want to refer to objects in the # astropy core. However, we don't want to cyclically reference astropy in its # own build so we remove it here. del intersphinx_mapping['astropy'] # noqa: F405 # add any custom intersphinx for astropy intersphinx_mapping['astropy-dev'] = ('https://docs.astropy.org/en/latest/', None) # noqa: F405 intersphinx_mapping['pyerfa'] = ('https://pyerfa.readthedocs.io/en/stable/', None) # noqa: F405 intersphinx_mapping['pytest'] = ('https://docs.pytest.org/en/stable/', None) # noqa: F405 intersphinx_mapping['ipython'] = ('https://ipython.readthedocs.io/en/stable/', None) # noqa: F405 intersphinx_mapping['pandas'] = ('https://pandas.pydata.org/pandas-docs/stable/', None) # noqa: F405, E501 intersphinx_mapping['sphinx_automodapi'] = ('https://sphinx-automodapi.readthedocs.io/en/stable/', None) # noqa: F405, E501 intersphinx_mapping['packagetemplate'] = ('https://docs.astropy.org/projects/package-template/en/latest/', None) # noqa: F405, E501 intersphinx_mapping['h5py'] = ('https://docs.h5py.org/en/stable/', None) # noqa: F405 intersphinx_mapping['asdf-astropy'] = ('https://asdf-astropy.readthedocs.io/en/latest/', None) # noqa: F405 # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns.append('_templates') # noqa: F405 exclude_patterns.append('changes') # noqa: F405 exclude_patterns.append('_pkgtemplate.rst') # noqa: F405 exclude_patterns.append('**/*.inc.rst') # .inc.rst mean *include* files, don't have sphinx process them # noqa: F405, E501 # Add any paths that contain templates here, relative to this directory. if 'templates_path' not in locals(): # in case parent conf.py defines it templates_path = [] templates_path.append('_templates') extensions += ["sphinx_changelog"] # noqa: F405 # Grab minversion from setup.cfg setup_cfg = configparser.ConfigParser() setup_cfg.read(os.path.join(os.path.pardir, 'setup.cfg')) __minimum_python_version__ = setup_cfg['options']['python_requires'].replace('>=', '') project = u'Astropy' min_versions = {} for line in metadata.requires('astropy'): req = Requirement(line.split(';')[0]) min_versions[req.name.lower()] = str(req.specifier) # This is added to the end of RST files - a good place to put substitutions to # be used globally. with open("common_links.txt", "r") as cl: rst_epilog += cl.read().format(minimum_python=__minimum_python_version__, **min_versions) # Manually register doctest options since matplotlib 3.5 messed up allowing them # from pytest-doctestplus IGNORE_OUTPUT = doctest.register_optionflag('IGNORE_OUTPUT') REMOTE_DATA = doctest.register_optionflag('REMOTE_DATA') FLOAT_CMP = doctest.register_optionflag('FLOAT_CMP') # Whether to create cross-references for the parameter types in the # Parameters, Other Parameters, Returns and Yields sections of the docstring. numpydoc_xref_param_type = True # Words not to cross-reference. Most likely, these are common words used in # parameter type descriptions that may be confused for classes of the same # name. The base set comes from sphinx-astropy. We add more here. numpydoc_xref_ignore.update({ "mixin", "Any", # aka something that would be annotated with `typing.Any` # needed in subclassing numpy # TODO! revisit "Arguments", "Path", # TODO! not need to ignore. "flag", "bits", }) # Mappings to fully qualified paths (or correct ReST references) for the # aliases/shortcuts used when specifying the types of parameters. # Numpy provides some defaults # https://github.com/numpy/numpydoc/blob/b352cd7635f2ea7748722f410a31f937d92545cc/numpydoc/xref.py#L62-L94 # and a base set comes from sphinx-astropy. # so here we mostly need to define Astropy-specific x-refs numpydoc_xref_aliases.update({ # python & adjacent "Any": "`~typing.Any`", "file-like": ":term:`python:file-like object`", "file": ":term:`python:file object`", "path-like": ":term:`python:path-like object`", "module": ":term:`python:module`", "buffer-like": ":term:buffer-like", "hashable": ":term:`python:hashable`", # for matplotlib "color": ":term:`color`", # for numpy "ints": ":class:`python:int`", # for astropy "number": ":term:`number`", "Representation": ":class:`~astropy.coordinates.BaseRepresentation`", "writable": ":term:`writable file-like object`", "readable": ":term:`readable file-like object`", "BaseHDU": ":doc:`HDU </io/fits/api/hdus>`" }) # Add from sphinx-astropy 1) glossary aliases 2) physical types. numpydoc_xref_aliases.update(numpydoc_xref_astropy_aliases) # -- Project information ------------------------------------------------------ author = u'The Astropy Developers' copyright = f'2011–{datetime.utcnow().year}, ' + author # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # The full version, including alpha/beta/rc tags. release = metadata.version(project) # The short X.Y version. version = '.'.join(release.split('.')[:2]) # Only include dev docs in dev version. dev = 'dev' in release if not dev: exclude_patterns.append('development/*') # noqa: F405 exclude_patterns.append('testhelpers.rst') # noqa: F405 # -- Options for the module index --------------------------------------------- modindex_common_prefix = ['astropy.'] # -- Options for HTML output --------------------------------------------------- # A NOTE ON HTML THEMES # # The global astropy configuration uses a custom theme, # 'bootstrap-astropy', which is installed along with astropy. The # theme has options for controlling the text of the logo in the upper # left corner. This is how you would specify the options in order to # override the theme defaults (The following options *are* the # defaults, so we do not actually need to set them here.) # html_theme_options = { # 'logotext1': 'astro', # white, semi-bold # 'logotext2': 'py', # orange, light # 'logotext3': ':docs' # white, light # } # A different theme can be used, or other parts of this theme can be # modified, by overriding some of the variables set in the global # configuration. The variables set in the global configuration are # listed below, commented out. # Add any paths that contain custom themes here, relative to this directory. # To use a different custom theme, add the directory containing the theme. # html_theme_path = [] # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. To override the custom theme, set this to the # name of a builtin theme or the name of a custom theme in html_theme_path. # html_theme = None # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = '' # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '' # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = f'{project} v{release}' # Output file base name for HTML help builder. htmlhelp_basename = project + 'doc' # A dictionary of values to pass into the template engine’s context for all pages. html_context = { 'to_be_indexed': ['stable', 'latest'], 'is_development': dev } # -- Options for LaTeX output -------------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [('index', project + '.tex', project + u' Documentation', author, 'manual')] latex_logo = '_static/astropy_logo.pdf' # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [('index', project.lower(), project + u' Documentation', [author], 1)] # Setting this URL is requited by sphinx-astropy github_issues_url = 'https://github.com/astropy/astropy/issues/' edit_on_github_branch = 'main' # Enable nitpicky mode - which ensures that all references in the docs # resolve. nitpicky = True # This is not used. See docs/nitpick-exceptions file for the actual listing. nitpick_ignore = [] for line in open('nitpick-exceptions'): if line.strip() == "" or line.startswith("#"): continue dtype, target = line.split(None, 1) target = target.strip() nitpick_ignore.append((dtype, target)) # -- Options for the Sphinx gallery ------------------------------------------- try: import warnings import sphinx_gallery # noqa: F401 extensions += ["sphinx_gallery.gen_gallery"] # noqa: F405 sphinx_gallery_conf = { 'backreferences_dir': 'generated/modules', # path to store the module using example template # noqa: E501 'filename_pattern': '^((?!skip_).)*$', # execute all examples except those that start with "skip_" # noqa: E501 'examples_dirs': f'..{os.sep}examples', # path to the examples scripts 'gallery_dirs': 'generated/examples', # path to save gallery generated examples 'reference_url': { 'astropy': None, 'matplotlib': 'https://matplotlib.org/stable/', 'numpy': 'https://numpy.org/doc/stable/', }, 'abort_on_example_error': True } # Filter out backend-related warnings as described in # https://github.com/sphinx-gallery/sphinx-gallery/pull/564 warnings.filterwarnings("ignore", category=UserWarning, message='Matplotlib is currently using agg, which is a' ' non-GUI backend, so cannot show the figure.') except ImportError: sphinx_gallery = None # -- Options for linkcheck output ------------------------------------------- linkcheck_retry = 5 linkcheck_ignore = ['https://journals.aas.org/manuscript-preparation/', 'https://maia.usno.navy.mil/', 'https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer', 'https://aa.usno.navy.mil/publications/docs/Circular_179.php', 'http://data.astropy.org', 'https://doi.org/10.1017/S0251107X00002406', # internal server error 'https://doi.org/10.1017/pasa.2013.31', # internal server error 'https://pyfits.readthedocs.io/en/v3.2.1/', # defunct page in CHANGES.rst r'https://github\.com/astropy/astropy/(?:issues|pull)/\d+'] linkcheck_timeout = 180 linkcheck_anchors = False # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. html_extra_path = ['robots.txt'] def rstjinja(app, docname, source): """Render pages as a jinja template to hide/show dev docs. """ # Make sure we're outputting HTML if app.builder.format != 'html': return files_to_render = ["index", "install"] if docname in files_to_render: print(f"Jinja rendering {docname}") rendered = app.builder.templates.render_string( source[0], app.config.html_context) source[0] = rendered def resolve_astropy_and_dev_reference(app, env, node, contnode): """ Reference targets for ``astropy:`` and ``astropy-dev:`` are special cases. Documentation links in astropy can be set up as intersphinx links so that affiliate packages do not have to override the docstrings when building the docs. If we are building the development docs it is a local ref targeting the label ``astropy-dev:<label>``, but for stable docs it should be an intersphinx resolution to the development docs. See https://github.com/astropy/astropy/issues/11366 """ # should the node be processed? reftarget = node.get('reftarget') # str or None if str(reftarget).startswith('astropy:'): # This allows Astropy to use intersphinx links to itself and have # them resolve to local links. Downstream packages will see intersphinx. # TODO! deprecate this if sphinx-doc/sphinx/issues/9169 is implemented. process, replace = True, 'astropy:' elif dev and str(reftarget).startswith('astropy-dev:'): process, replace = True, 'astropy-dev:' else: process, replace = False, '' # make link local if process: reftype = node.get('reftype') refdoc = node.get('refdoc', app.env.docname) # convert astropy intersphinx targets to local links. # there are a few types of intersphinx link patters, as described in # https://docs.readthedocs.io/en/stable/guides/intersphinx.html reftarget = reftarget.replace(replace, '') if reftype == "doc": # also need to replace the doc link node.replace_attr("reftarget", reftarget) # Delegate to the ref node's original domain/target (typically :ref:) try: domain = app.env.domains[node['refdomain']] return domain.resolve_xref(app.env, refdoc, app.builder, reftype, reftarget, node, contnode) except Exception: pass # Otherwise return None which should delegate to intersphinx def setup(app): if sphinx_gallery is None: msg = ('The sphinx_gallery extension is not installed, so the ' 'gallery will not be built. You will probably see ' 'additional warnings about undefined references due ' 'to this.') try: app.warn(msg) except AttributeError: # Sphinx 1.6+ from sphinx.util import logging logger = logging.getLogger(__name__) logger.warning(msg) # Generate the page from Jinja template app.connect("source-read", rstjinja) # Set this to higher priority than intersphinx; this way when building # dev docs astropy-dev: targets will go to the local docs instead of the # intersphinx mapping app.connect("missing-reference", resolve_astropy_and_dev_reference, priority=400)
6dd7b6545ee9469ed5d9fcdc50a80bee50704552f094c64092fb8a2667ce09dc
import os import shutil import sys import erfa # noqa import matplotlib import pytest import astropy # noqa if len(sys.argv) == 3 and sys.argv[1] == '--astropy-root': ROOT = sys.argv[2] else: # Make sure we don't allow any arguments to be passed - some tests call # sys.executable which becomes this script when producing a pyinstaller # bundle, but we should just error in this case since this is not the # regular Python interpreter. if len(sys.argv) > 1: print("Extra arguments passed, exiting early") sys.exit(1) for root, dirnames, files in os.walk(os.path.join(ROOT, 'astropy')): # NOTE: we can't simply use # test_root = root.replace('astropy', 'astropy_tests') # as we only want to change the one which is for the module, so instead # we search for the last occurrence and replace that. pos = root.rfind('astropy') test_root = root[:pos] + 'astropy_tests' + root[pos + 7:] # Copy over the astropy 'tests' directories and their contents for dirname in dirnames: final_dir = os.path.relpath(os.path.join(test_root, dirname), ROOT) # We only copy over 'tests' directories, but not astropy/tests (only # astropy/tests/tests) since that is not just a directory with tests. if dirname == 'tests' and not root.endswith('astropy'): shutil.copytree(os.path.join(root, dirname), final_dir, dirs_exist_ok=True) else: # Create empty __init__.py files so that 'astropy_tests' still # behaves like a single package, otherwise pytest gets confused # by the different conftest.py files. init_filename = os.path.join(final_dir, '__init__.py') if not os.path.exists(os.path.join(final_dir, '__init__.py')): os.makedirs(final_dir, exist_ok=True) with open(os.path.join(final_dir, '__init__.py'), 'w') as f: f.write("#") # Copy over all conftest.py files for file in files: if file == 'conftest.py': final_file = os.path.relpath(os.path.join(test_root, file), ROOT) shutil.copy2(os.path.join(root, file), final_file) # Add the top-level __init__.py file with open(os.path.join('astropy_tests', '__init__.py'), 'w') as f: f.write("#") # Remove test file that tries to import all sub-packages at collection time os.remove(os.path.join('astropy_tests', 'utils', 'iers', 'tests', 'test_leap_second.py')) # Remove convolution tests for now as there are issues with the loading of the C extension. # FIXME: one way to fix this would be to migrate the convolution C extension away from using # ctypes and using the regular extension mechanism instead. shutil.rmtree(os.path.join('astropy_tests', 'convolution')) os.remove(os.path.join('astropy_tests', 'modeling', 'tests', 'test_convolution.py')) os.remove(os.path.join('astropy_tests', 'modeling', 'tests', 'test_core.py')) os.remove(os.path.join('astropy_tests', 'visualization', 'tests', 'test_lupton_rgb.py')) # FIXME: PIL minversion check does not work os.remove(os.path.join('astropy_tests', 'visualization', 'wcsaxes', 'tests', 'test_misc.py')) os.remove(os.path.join('astropy_tests', 'visualization', 'wcsaxes', 'tests', 'test_wcsapi.py')) # FIXME: The following tests rely on the fully qualified name of classes which # don't seem to be the same. os.remove(os.path.join('astropy_tests', 'table', 'mixins', 'tests', 'test_registry.py')) # Copy the top-level conftest.py shutil.copy2(os.path.join(ROOT, 'astropy', 'conftest.py'), os.path.join('astropy_tests', 'conftest.py')) # matplotlib hook in pyinstaller 5.0 and later no longer collects every backend, see # https://github.com/pyinstaller/pyinstaller/issues/6760 matplotlib.use('svg') # We skip a few tests, which are generally ones that rely on explicitly # checking the name of the current module (which ends up starting with # astropy_tests rather than astropy). SKIP_TESTS = ['test_exception_logging_origin', 'test_log', 'test_configitem', 'test_config_noastropy_fallback', 'test_no_home', 'test_path', 'test_rename_path', 'test_data_name_third_party_package', 'test_pkg_finder', 'test_wcsapi_extension', 'test_find_current_module_bundle', 'test_minversion', 'test_imports', 'test_generate_config', 'test_generate_config2', 'test_create_config_file', 'test_download_parallel_fills_cache'] # Run the tests! sys.exit(pytest.main(['astropy_tests', '-k ' + ' and '.join('not ' + test for test in SKIP_TESTS)], plugins=['pytest_astropy.plugin', 'pytest_doctestplus.plugin', 'pytest_openfiles.plugin', 'pytest_remotedata.plugin', 'pytest_astropy_header.display']))
3b355f16cc7f8b082f727699cd9bdbddd349e6924f11d0d03d69bec01817b4ca
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains simple statistical algorithms that are straightforwardly implemented as a single python function (or family of functions). This module should generally not be used directly. Everything in `__all__` is imported into `astropy.stats`, and hence that package should be used for access. """ import math import numpy as np import astropy.units as u from . import _stats __all__ = ['gaussian_fwhm_to_sigma', 'gaussian_sigma_to_fwhm', 'binom_conf_interval', 'binned_binom_proportion', 'poisson_conf_interval', 'median_absolute_deviation', 'mad_std', 'signal_to_noise_oir_ccd', 'bootstrap', 'kuiper', 'kuiper_two', 'kuiper_false_positive_probability', 'cdf_from_intervals', 'interval_overlap_length', 'histogram_intervals', 'fold_intervals'] __doctest_skip__ = ['binned_binom_proportion'] __doctest_requires__ = {'binom_conf_interval': ['scipy'], 'poisson_conf_interval': ['scipy']} gaussian_sigma_to_fwhm = 2.0 * math.sqrt(2.0 * math.log(2.0)) """ Factor with which to multiply Gaussian 1-sigma standard deviation to convert it to full width at half maximum (FWHM). """ gaussian_fwhm_to_sigma = 1. / gaussian_sigma_to_fwhm """ Factor with which to multiply Gaussian full width at half maximum (FWHM) to convert it to 1-sigma standard deviation. """ # NUMPY_LT_1_18 def _expand_dims(data, axis): """ Expand the shape of an array. Insert a new axis that will appear at the `axis` position in the expanded array shape. This function allows for tuple axis arguments. ``numpy.expand_dims`` currently does not allow that, but it will in numpy v1.18 (https://github.com/numpy/numpy/pull/14051). ``_expand_dims`` can be replaced with ``numpy.expand_dims`` when the minimum support numpy version is v1.18. Parameters ---------- data : array-like Input array. axis : int or tuple of int Position in the expanded axes where the new axis (or axes) is placed. A tuple of axes is now supported. Out of range axes as described above are now forbidden and raise an `AxisError`. Returns ------- result : ndarray View of ``data`` with the number of dimensions increased. """ if isinstance(data, np.matrix): data = np.asarray(data) else: data = np.asanyarray(data) if not isinstance(axis, (tuple, list)): axis = (axis,) out_ndim = len(axis) + data.ndim axis = np.core.numeric.normalize_axis_tuple(axis, out_ndim) shape_it = iter(data.shape) shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)] return data.reshape(shape) def binom_conf_interval(k, n, confidence_level=0.68269, interval='wilson'): r"""Binomial proportion confidence interval given k successes, n trials. Parameters ---------- k : int or numpy.ndarray Number of successes (0 <= ``k`` <= ``n``). n : int or numpy.ndarray Number of trials (``n`` > 0). If both ``k`` and ``n`` are arrays, they must have the same shape. confidence_level : float, optional Desired probability content of interval. Default is 0.68269, corresponding to 1 sigma in a 1-dimensional Gaussian distribution. Confidence level must be in range [0, 1]. interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional Formula used for confidence interval. See notes for details. The ``'wilson'`` and ``'jeffreys'`` intervals generally give similar results, while 'flat' is somewhat different, especially for small values of ``n``. ``'wilson'`` should be somewhat faster than ``'flat'`` or ``'jeffreys'``. The 'wald' interval is generally not recommended. It is provided for comparison purposes. Default is ``'wilson'``. Returns ------- conf_interval : ndarray ``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower and upper limits, respectively, for each element in ``k``, ``n``. Notes ----- In situations where a probability of success is not known, it can be estimated from a number of trials (n) and number of observed successes (k). For example, this is done in Monte Carlo experiments designed to estimate a detection efficiency. It is simple to take the sample proportion of successes (k/n) as a reasonable best estimate of the true probability :math:`\epsilon`. However, deriving an accurate confidence interval on :math:`\epsilon` is non-trivial. There are several formulas for this interval (see [1]_). Four intervals are implemented here: **1. The Wilson Interval.** This interval, attributed to Wilson [2]_, is given by .. math:: CI_{\rm Wilson} = \frac{k + \kappa^2/2}{n + \kappa^2} \pm \frac{\kappa n^{1/2}}{n + \kappa^2} ((\hat{\epsilon}(1 - \hat{\epsilon}) + \kappa^2/(4n))^{1/2} where :math:`\hat{\epsilon} = k / n` and :math:`\kappa` is the number of standard deviations corresponding to the desired confidence interval for a *normal* distribution (for example, 1.0 for a confidence interval of 68.269%). For a confidence interval of 100(1 - :math:`\alpha`)%, .. math:: \kappa = \Phi^{-1}(1-\alpha/2) = \sqrt{2}{\rm erf}^{-1}(1-\alpha). **2. The Jeffreys Interval.** This interval is derived by applying Bayes' theorem to the binomial distribution with the noninformative Jeffreys prior [3]_, [4]_. The noninformative Jeffreys prior is the Beta distribution, Beta(1/2, 1/2), which has the density function .. math:: f(\epsilon) = \pi^{-1} \epsilon^{-1/2}(1-\epsilon)^{-1/2}. The justification for this prior is that it is invariant under reparameterizations of the binomial proportion. The posterior density function is also a Beta distribution: Beta(k + 1/2, n - k + 1/2). The interval is then chosen so that it is *equal-tailed*: Each tail (outside the interval) contains :math:`\alpha`/2 of the posterior probability, and the interval itself contains 1 - :math:`\alpha`. This interval must be calculated numerically. Additionally, when k = 0 the lower limit is set to 0 and when k = n the upper limit is set to 1, so that in these cases, there is only one tail containing :math:`\alpha`/2 and the interval itself contains 1 - :math:`\alpha`/2 rather than the nominal 1 - :math:`\alpha`. **3. A Flat prior.** This is similar to the Jeffreys interval, but uses a flat (uniform) prior on the binomial proportion over the range 0 to 1 rather than the reparametrization-invariant Jeffreys prior. The posterior density function is a Beta distribution: Beta(k + 1, n - k + 1). The same comments about the nature of the interval (equal-tailed, etc.) also apply to this option. **4. The Wald Interval.** This interval is given by .. math:: CI_{\rm Wald} = \hat{\epsilon} \pm \kappa \sqrt{\frac{\hat{\epsilon}(1-\hat{\epsilon})}{n}} The Wald interval gives acceptable results in some limiting cases. Particularly, when n is very large, and the true proportion :math:`\epsilon` is not "too close" to 0 or 1. However, as the later is not verifiable when trying to estimate :math:`\epsilon`, this is not very helpful. Its use is not recommended, but it is provided here for comparison purposes due to its prevalence in everyday practical statistics. This function requires ``scipy`` for all interval types. References ---------- .. [1] Brown, Lawrence D.; Cai, T. Tony; DasGupta, Anirban (2001). "Interval Estimation for a Binomial Proportion". Statistical Science 16 (2): 101-133. doi:10.1214/ss/1009213286 .. [2] Wilson, E. B. (1927). "Probable inference, the law of succession, and statistical inference". Journal of the American Statistical Association 22: 209-212. .. [3] Jeffreys, Harold (1946). "An Invariant Form for the Prior Probability in Estimation Problems". Proc. R. Soc. Lond.. A 24 186 (1007): 453-461. doi:10.1098/rspa.1946.0056 .. [4] Jeffreys, Harold (1998). Theory of Probability. Oxford University Press, 3rd edition. ISBN 978-0198503682 Examples -------- Integer inputs return an array with shape (2,): >>> binom_conf_interval(4, 5, interval='wilson') # doctest: +FLOAT_CMP array([0.57921724, 0.92078259]) Arrays of arbitrary dimension are supported. The Wilson and Jeffreys intervals give similar results, even for small k, n: >>> binom_conf_interval([1, 2], 5, interval='wilson') # doctest: +FLOAT_CMP array([[0.07921741, 0.21597328], [0.42078276, 0.61736012]]) >>> binom_conf_interval([1, 2,], 5, interval='jeffreys') # doctest: +FLOAT_CMP array([[0.0842525 , 0.21789949], [0.42218001, 0.61753691]]) >>> binom_conf_interval([1, 2], 5, interval='flat') # doctest: +FLOAT_CMP array([[0.12139799, 0.24309021], [0.45401727, 0.61535699]]) In contrast, the Wald interval gives poor results for small k, n. For k = 0 or k = n, the interval always has zero length. >>> binom_conf_interval([1, 2], 5, interval='wald') # doctest: +FLOAT_CMP array([[0.02111437, 0.18091075], [0.37888563, 0.61908925]]) For confidence intervals approaching 1, the Wald interval for 0 < k < n can give intervals that extend outside [0, 1]: >>> binom_conf_interval([1, 2], 5, interval='wald', confidence_level=0.99) # doctest: +FLOAT_CMP array([[-0.26077835, -0.16433593], [ 0.66077835, 0.96433593]]) """ # noqa if confidence_level < 0. or confidence_level > 1.: raise ValueError('confidence_level must be between 0. and 1.') alpha = 1. - confidence_level k = np.asarray(k).astype(int) n = np.asarray(n).astype(int) if (n <= 0).any(): raise ValueError('n must be positive') if (k < 0).any() or (k > n).any(): raise ValueError('k must be in {0, 1, .., n}') if interval == 'wilson' or interval == 'wald': from scipy.special import erfinv kappa = np.sqrt(2.) * min(erfinv(confidence_level), 1.e10) # Avoid overflows. k = k.astype(float) n = n.astype(float) p = k / n if interval == 'wilson': midpoint = (k + kappa ** 2 / 2.) / (n + kappa ** 2) halflength = (kappa * np.sqrt(n)) / (n + kappa ** 2) * \ np.sqrt(p * (1 - p) + kappa ** 2 / (4 * n)) conf_interval = np.array([midpoint - halflength, midpoint + halflength]) # Correct intervals out of range due to floating point errors. conf_interval[conf_interval < 0.] = 0. conf_interval[conf_interval > 1.] = 1. else: midpoint = p halflength = kappa * np.sqrt(p * (1. - p) / n) conf_interval = np.array([midpoint - halflength, midpoint + halflength]) elif interval == 'jeffreys' or interval == 'flat': from scipy.special import betaincinv if interval == 'jeffreys': lowerbound = betaincinv(k + 0.5, n - k + 0.5, 0.5 * alpha) upperbound = betaincinv(k + 0.5, n - k + 0.5, 1. - 0.5 * alpha) else: lowerbound = betaincinv(k + 1, n - k + 1, 0.5 * alpha) upperbound = betaincinv(k + 1, n - k + 1, 1. - 0.5 * alpha) # Set lower or upper bound to k/n when k/n = 0 or 1 # We have to treat the special case of k/n being scalars, # which is an ugly kludge if lowerbound.ndim == 0: if k == 0: lowerbound = 0. elif k == n: upperbound = 1. else: lowerbound[k == 0] = 0 upperbound[k == n] = 1 conf_interval = np.array([lowerbound, upperbound]) else: raise ValueError(f'Unrecognized interval: {interval:s}') return conf_interval def binned_binom_proportion(x, success, bins=10, range=None, confidence_level=0.68269, interval='wilson'): """Binomial proportion and confidence interval in bins of a continuous variable ``x``. Given a set of datapoint pairs where the ``x`` values are continuously distributed and the ``success`` values are binomial ("success / failure" or "true / false"), place the pairs into bins according to ``x`` value and calculate the binomial proportion (fraction of successes) and confidence interval in each bin. Parameters ---------- x : sequence Values. success : sequence of bool Success (`True`) or failure (`False`) corresponding to each value in ``x``. Must be same length as ``x``. bins : int or sequence of scalar, optional If bins is an int, it defines the number of equal-width bins in the given range (10, by default). If bins is a sequence, it defines the bin edges, including the rightmost edge, allowing for non-uniform bin widths (in this case, 'range' is ignored). range : (float, float), optional The lower and upper range of the bins. If `None` (default), the range is set to ``(x.min(), x.max())``. Values outside the range are ignored. confidence_level : float, optional Must be in range [0, 1]. Desired probability content in the confidence interval ``(p - perr[0], p + perr[1])`` in each bin. Default is 0.68269. interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional Formula used to calculate confidence interval on the binomial proportion in each bin. See `binom_conf_interval` for definition of the intervals. The 'wilson', 'jeffreys', and 'flat' intervals generally give similar results. 'wilson' should be somewhat faster, while 'jeffreys' and 'flat' are marginally superior, but differ in the assumed prior. The 'wald' interval is generally not recommended. It is provided for comparison purposes. Default is 'wilson'. Returns ------- bin_ctr : ndarray Central value of bins. Bins without any entries are not returned. bin_halfwidth : ndarray Half-width of each bin such that ``bin_ctr - bin_halfwidth`` and ``bin_ctr + bins_halfwidth`` give the left and right side of each bin, respectively. p : ndarray Efficiency in each bin. perr : ndarray 2-d array of shape (2, len(p)) representing the upper and lower uncertainty on p in each bin. Notes ----- This function requires ``scipy`` for all interval types. See Also -------- binom_conf_interval : Function used to estimate confidence interval in each bin. Examples -------- Suppose we wish to estimate the efficiency of a survey in detecting astronomical sources as a function of magnitude (i.e., the probability of detecting a source given its magnitude). In a realistic case, we might prepare a large number of sources with randomly selected magnitudes, inject them into simulated images, and then record which were detected at the end of the reduction pipeline. As a toy example, we generate 100 data points with randomly selected magnitudes between 20 and 30 and "observe" them with a known detection function (here, the error function, with 50% detection probability at magnitude 25): >>> from scipy.special import erf >>> from scipy.stats.distributions import binom >>> def true_efficiency(x): ... return 0.5 - 0.5 * erf((x - 25.) / 2.) >>> mag = 20. + 10. * np.random.rand(100) >>> detected = binom.rvs(1, true_efficiency(mag)) >>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20) >>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o', ... label='estimate') .. plot:: import numpy as np from scipy.special import erf from scipy.stats.distributions import binom import matplotlib.pyplot as plt from astropy.stats import binned_binom_proportion def true_efficiency(x): return 0.5 - 0.5 * erf((x - 25.) / 2.) np.random.seed(400) mag = 20. + 10. * np.random.rand(100) np.random.seed(600) detected = binom.rvs(1, true_efficiency(mag)) bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20) plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o', label='estimate') X = np.linspace(20., 30., 1000) plt.plot(X, true_efficiency(X), label='true efficiency') plt.ylim(0., 1.) plt.title('Detection efficiency vs magnitude') plt.xlabel('Magnitude') plt.ylabel('Detection efficiency') plt.legend() plt.show() The above example uses the Wilson confidence interval to calculate the uncertainty ``perr`` in each bin (see the definition of various confidence intervals in `binom_conf_interval`). A commonly used alternative is the Wald interval. However, the Wald interval can give nonsensical uncertainties when the efficiency is near 0 or 1, and is therefore **not** recommended. As an illustration, the following example shows the same data as above but uses the Wald interval rather than the Wilson interval to calculate ``perr``: >>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20, ... interval='wald') >>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o', ... label='estimate') .. plot:: import numpy as np from scipy.special import erf from scipy.stats.distributions import binom import matplotlib.pyplot as plt from astropy.stats import binned_binom_proportion def true_efficiency(x): return 0.5 - 0.5 * erf((x - 25.) / 2.) np.random.seed(400) mag = 20. + 10. * np.random.rand(100) np.random.seed(600) detected = binom.rvs(1, true_efficiency(mag)) bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20, interval='wald') plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o', label='estimate') X = np.linspace(20., 30., 1000) plt.plot(X, true_efficiency(X), label='true efficiency') plt.ylim(0., 1.) plt.title('The Wald interval can give nonsensical uncertainties') plt.xlabel('Magnitude') plt.ylabel('Detection efficiency') plt.legend() plt.show() """ x = np.ravel(x) success = np.ravel(success).astype(bool) if x.shape != success.shape: raise ValueError('sizes of x and success must match') # Put values into a histogram (`n`). Put "successful" values # into a second histogram (`k`) with identical binning. n, bin_edges = np.histogram(x, bins=bins, range=range) k, bin_edges = np.histogram(x[success], bins=bin_edges) bin_ctr = (bin_edges[:-1] + bin_edges[1:]) / 2. bin_halfwidth = bin_ctr - bin_edges[:-1] # Remove bins with zero entries. valid = n > 0 bin_ctr = bin_ctr[valid] bin_halfwidth = bin_halfwidth[valid] n = n[valid] k = k[valid] p = k / n bounds = binom_conf_interval(k, n, confidence_level=confidence_level, interval=interval) perr = np.abs(bounds - p) return bin_ctr, bin_halfwidth, p, perr def _check_poisson_conf_inputs(sigma, background, confidence_level, name): if sigma != 1: raise ValueError(f"Only sigma=1 supported for interval {name}") if background != 0: raise ValueError(f"background not supported for interval {name}") if confidence_level is not None: raise ValueError(f"confidence_level not supported for interval {name}") def poisson_conf_interval(n, interval='root-n', sigma=1, background=0, confidence_level=None): r"""Poisson parameter confidence interval given observed counts Parameters ---------- n : int or numpy.ndarray Number of counts (0 <= ``n``). interval : {'root-n','root-n-0','pearson','sherpagehrels','frequentist-confidence', 'kraft-burrows-nousek'}, optional Formula used for confidence interval. See notes for details. Default is ``'root-n'``. sigma : float, optional Number of sigma for confidence interval; only supported for the 'frequentist-confidence' mode. background : float, optional Number of counts expected from the background; only supported for the 'kraft-burrows-nousek' mode. This number is assumed to be determined from a large region so that the uncertainty on its value is negligible. confidence_level : float, optional Confidence level between 0 and 1; only supported for the 'kraft-burrows-nousek' mode. Returns ------- conf_interval : ndarray ``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower and upper limits, respectively, for each element in ``n``. Notes ----- The "right" confidence interval to use for Poisson data is a matter of debate. The CDF working group `recommends <https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_ using root-n throughout, largely in the interest of comprehensibility, but discusses other possibilities. The ATLAS group also discusses several possibilities but concludes that no single representation is suitable for all cases. The suggestion has also been `floated <https://ui.adsabs.harvard.edu/abs/2012EPJP..127...24A>`_ that error bars should be attached to theoretical predictions instead of observed data, which this function will not help with (but it's easy; then you really should use the square root of the theoretical prediction). The intervals implemented here are: **1. 'root-n'** This is a very widely used standard rule derived from the maximum-likelihood estimator for the mean of the Poisson process. While it produces questionable results for small n and outright wrong results for n=0, it is standard enough that people are (supposedly) used to interpreting these wonky values. The interval is .. math:: CI = (n-\sqrt{n}, n+\sqrt{n}) **2. 'root-n-0'** This is identical to the above except that where n is zero the interval returned is (0,1). **3. 'pearson'** This is an only-slightly-more-complicated rule based on Pearson's chi-squared rule (as `explained <https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_ by the CDF working group). It also has the nice feature that if your theory curve touches an endpoint of the interval, then your data point is indeed one sigma away. The interval is .. math:: CI = (n+0.5-\sqrt{n+0.25}, n+0.5+\sqrt{n+0.25}) **4. 'sherpagehrels'** This rule is used by default in the fitting package 'sherpa'. The `documentation <https://cxc.harvard.edu/sherpa4.4/statistics/#chigehrels>`_ claims it is based on a numerical approximation published in `Gehrels (1986) <https://ui.adsabs.harvard.edu/abs/1986ApJ...303..336G>`_ but it does not actually appear there. It is symmetrical, and while the upper limits are within about 1% of those given by 'frequentist-confidence', the lower limits can be badly wrong. The interval is .. math:: CI = (n-1-\sqrt{n+0.75}, n+1+\sqrt{n+0.75}) **5. 'frequentist-confidence'** These are frequentist central confidence intervals: .. math:: CI = (0.5 F_{\chi^2}^{-1}(\alpha;2n), 0.5 F_{\chi^2}^{-1}(1-\alpha;2(n+1))) where :math:`F_{\chi^2}^{-1}` is the quantile of the chi-square distribution with the indicated number of degrees of freedom and :math:`\alpha` is the one-tailed probability of the normal distribution (at the point given by the parameter 'sigma'). See `Maxwell (2011) <https://ui.adsabs.harvard.edu/abs/2011arXiv1102.0822M>`_ for further details. **6. 'kraft-burrows-nousek'** This is a Bayesian approach which allows for the presence of a known background :math:`B` in the source signal :math:`N`. For a given confidence level :math:`CL` the confidence interval :math:`[S_\mathrm{min}, S_\mathrm{max}]` is given by: .. math:: CL = \int^{S_\mathrm{max}}_{S_\mathrm{min}} f_{N,B}(S)dS where the function :math:`f_{N,B}` is: .. math:: f_{N,B}(S) = C \frac{e^{-(S+B)}(S+B)^N}{N!} and the normalization constant :math:`C`: .. math:: C = \left[ \int_0^\infty \frac{e^{-(S+B)}(S+B)^N}{N!} dS \right] ^{-1} = \left( \sum^N_{n=0} \frac{e^{-B}B^n}{n!} \right)^{-1} See `Kraft, Burrows, and Nousek (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ for further details. These formulas implement a positive, uniform prior. `Kraft, Burrows, and Nousek (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ discuss this choice in more detail and show that the problem is relatively insensitive to the choice of prior. This function has an optional dependency: Either `Scipy <https://www.scipy.org/>`_ or `mpmath <http://mpmath.org/>`_ need to be available (Scipy works only for N < 100). This code is very intense numerically, which makes it much slower than the other methods, in particular for large count numbers (above 1000 even with ``mpmath``). Fortunately, some of the other methods or a Gaussian approximation usually work well in this regime. Examples -------- >>> poisson_conf_interval(np.arange(10), interval='root-n').T array([[ 0. , 0. ], [ 0. , 2. ], [ 0.58578644, 3.41421356], [ 1.26794919, 4.73205081], [ 2. , 6. ], [ 2.76393202, 7.23606798], [ 3.55051026, 8.44948974], [ 4.35424869, 9.64575131], [ 5.17157288, 10.82842712], [ 6. , 12. ]]) >>> poisson_conf_interval(np.arange(10), interval='root-n-0').T array([[ 0. , 1. ], [ 0. , 2. ], [ 0.58578644, 3.41421356], [ 1.26794919, 4.73205081], [ 2. , 6. ], [ 2.76393202, 7.23606798], [ 3.55051026, 8.44948974], [ 4.35424869, 9.64575131], [ 5.17157288, 10.82842712], [ 6. , 12. ]]) >>> poisson_conf_interval(np.arange(10), interval='pearson').T array([[ 0. , 1. ], [ 0.38196601, 2.61803399], [ 1. , 4. ], [ 1.69722436, 5.30277564], [ 2.43844719, 6.56155281], [ 3.20871215, 7.79128785], [ 4. , 9. ], [ 4.8074176 , 10.1925824 ], [ 5.62771868, 11.37228132], [ 6.45861873, 12.54138127]]) >>> poisson_conf_interval( ... np.arange(10), interval='frequentist-confidence').T array([[ 0. , 1.84102165], [ 0.17275378, 3.29952656], [ 0.70818544, 4.63785962], [ 1.36729531, 5.91818583], [ 2.08566081, 7.16275317], [ 2.84030886, 8.38247265], [ 3.62006862, 9.58364155], [ 4.41852954, 10.77028072], [ 5.23161394, 11.94514152], [ 6.05653896, 13.11020414]]) >>> poisson_conf_interval( ... 7, interval='frequentist-confidence').T array([ 4.41852954, 10.77028072]) >>> poisson_conf_interval( ... 10, background=1.5, confidence_level=0.95, ... interval='kraft-burrows-nousek').T # doctest: +FLOAT_CMP array([[ 3.47894005, 16.113329533]]) """ # noqa if not np.isscalar(n): n = np.asanyarray(n) if interval == 'root-n': _check_poisson_conf_inputs(sigma, background, confidence_level, interval) conf_interval = np.array([n - np.sqrt(n), n + np.sqrt(n)]) elif interval == 'root-n-0': _check_poisson_conf_inputs(sigma, background, confidence_level, interval) conf_interval = np.array([n - np.sqrt(n), n + np.sqrt(n)]) if np.isscalar(n): if n == 0: conf_interval[1] = 1 else: conf_interval[1, n == 0] = 1 elif interval == 'pearson': _check_poisson_conf_inputs(sigma, background, confidence_level, interval) conf_interval = np.array([n + 0.5 - np.sqrt(n + 0.25), n + 0.5 + np.sqrt(n + 0.25)]) elif interval == 'sherpagehrels': _check_poisson_conf_inputs(sigma, background, confidence_level, interval) conf_interval = np.array([n - 1 - np.sqrt(n + 0.75), n + 1 + np.sqrt(n + 0.75)]) elif interval == 'frequentist-confidence': _check_poisson_conf_inputs(1., background, confidence_level, interval) import scipy.stats alpha = scipy.stats.norm.sf(sigma) conf_interval = np.array([0.5 * scipy.stats.chi2(2 * n).ppf(alpha), 0.5 * scipy.stats.chi2(2 * n + 2).isf(alpha)]) if np.isscalar(n): if n == 0: conf_interval[0] = 0 else: conf_interval[0, n == 0] = 0 elif interval == 'kraft-burrows-nousek': # Deprecation warning in Python 3.9 when N is float, so we force int, # see https://github.com/astropy/astropy/issues/10832 if np.isscalar(n): if not isinstance(n, int): raise TypeError('Number of counts must be integer.') elif not issubclass(n.dtype.type, np.integer): raise TypeError('Number of counts must be integer.') if confidence_level is None: raise ValueError('Set confidence_level for method {}. (sigma is ' 'ignored.)'.format(interval)) confidence_level = np.asanyarray(confidence_level) if np.any(confidence_level <= 0) or np.any(confidence_level >= 1): raise ValueError('confidence_level must be a number between 0 and 1.') background = np.asanyarray(background) if np.any(background < 0): raise ValueError('Background must be >= 0.') conf_interval = np.vectorize(_kraft_burrows_nousek, cache=True)(n, background, confidence_level) conf_interval = np.vstack(conf_interval) else: raise ValueError(f"Invalid method for Poisson confidence intervals: {interval}") return conf_interval def median_absolute_deviation(data, axis=None, func=None, ignore_nan=False): """ Calculate the median absolute deviation (MAD). The MAD is defined as ``median(abs(a - median(a)))``. Parameters ---------- data : array-like Input array or object that can be converted to an array. axis : None, int, or tuple of int, optional The axis or axes along which the MADs are computed. The default (`None`) is to compute the MAD of the flattened array. func : callable, optional The function used to compute the median. Defaults to `numpy.ma.median` for masked arrays, otherwise to `numpy.median`. ignore_nan : bool Ignore NaN values (treat them as if they are not in the array) when computing the median. This will use `numpy.ma.median` if ``axis`` is specified, or `numpy.nanmedian` if ``axis==None`` and numpy's version is >1.10 because nanmedian is slightly faster in this case. Returns ------- mad : float or `~numpy.ndarray` The median absolute deviation of the input array. If ``axis`` is `None` then a scalar will be returned, otherwise a `~numpy.ndarray` will be returned. Examples -------- Generate random variates from a Gaussian distribution and return the median absolute deviation for that distribution:: >>> import numpy as np >>> from astropy.stats import median_absolute_deviation >>> rand = np.random.default_rng(12345) >>> from numpy.random import randn >>> mad = median_absolute_deviation(rand.standard_normal(1000)) >>> print(mad) # doctest: +FLOAT_CMP 0.6829504282771885 See Also -------- mad_std """ if func is None: # Check if the array has a mask and if so use np.ma.median # See https://github.com/numpy/numpy/issues/7330 why using np.ma.median # for normal arrays should not be done (summary: np.ma.median always # returns an masked array even if the result should be scalar). (#4658) if isinstance(data, np.ma.MaskedArray): is_masked = True func = np.ma.median if ignore_nan: data = np.ma.masked_where(np.isnan(data), data, copy=True) elif ignore_nan: is_masked = False func = np.nanmedian else: is_masked = False func = np.median # drops units if result is NaN else: is_masked = None data = np.asanyarray(data) # np.nanmedian has `keepdims`, which is a good option if we're not allowing # user-passed functions here data_median = func(data, axis=axis) # this conditional can be removed after this PR is merged: # https://github.com/astropy/astropy/issues/12165 if (isinstance(data, u.Quantity) and func is np.median and data_median.ndim == 0 and np.isnan(data_median)): data_median = data.__array_wrap__(data_median) # broadcast the median array before subtraction if axis is not None: data_median = _expand_dims(data_median, axis=axis) # NUMPY_LT_1_18 result = func(np.abs(data - data_median), axis=axis, overwrite_input=True) # this conditional can be removed after this PR is merged: # https://github.com/astropy/astropy/issues/12165 if (isinstance(data, u.Quantity) and func is np.median and result.ndim == 0 and np.isnan(result)): result = data.__array_wrap__(result) if axis is None and np.ma.isMaskedArray(result): # return scalar version result = result.item() elif np.ma.isMaskedArray(result) and not is_masked: # if the input array was not a masked array, we don't want to return a # masked array result = result.filled(fill_value=np.nan) return result def mad_std(data, axis=None, func=None, ignore_nan=False): r""" Calculate a robust standard deviation using the `median absolute deviation (MAD) <https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The standard deviation estimator is given by: .. math:: \sigma \approx \frac{\textrm{MAD}}{\Phi^{-1}(3/4)} \approx 1.4826 \ \textrm{MAD} where :math:`\Phi^{-1}(P)` is the normal inverse cumulative distribution function evaluated at probability :math:`P = 3/4`. Parameters ---------- data : array-like Data array or object that can be converted to an array. axis : None, int, or tuple of int, optional The axis or axes along which the robust standard deviations are computed. The default (`None`) is to compute the robust standard deviation of the flattened array. func : callable, optional The function used to compute the median. Defaults to `numpy.ma.median` for masked arrays, otherwise to `numpy.median`. ignore_nan : bool Ignore NaN values (treat them as if they are not in the array) when computing the median. This will use `numpy.ma.median` if ``axis`` is specified, or `numpy.nanmedian` if ``axis=None`` and numpy's version is >1.10 because nanmedian is slightly faster in this case. Returns ------- mad_std : float or `~numpy.ndarray` The robust standard deviation of the input data. If ``axis`` is `None` then a scalar will be returned, otherwise a `~numpy.ndarray` will be returned. Examples -------- >>> import numpy as np >>> from astropy.stats import mad_std >>> rand = np.random.default_rng(12345) >>> madstd = mad_std(rand.normal(5, 2, (100, 100))) >>> print(madstd) # doctest: +FLOAT_CMP 1.984147963351707 See Also -------- biweight_midvariance, biweight_midcovariance, median_absolute_deviation """ # NOTE: 1. / scipy.stats.norm.ppf(0.75) = 1.482602218505602 MAD = median_absolute_deviation( data, axis=axis, func=func, ignore_nan=ignore_nan) return MAD * 1.482602218505602 def signal_to_noise_oir_ccd(t, source_eps, sky_eps, dark_eps, rd, npix, gain=1.0): """Computes the signal to noise ratio for source being observed in the optical/IR using a CCD. Parameters ---------- t : float or numpy.ndarray CCD integration time in seconds source_eps : float Number of electrons (photons) or DN per second in the aperture from the source. Note that this should already have been scaled by the filter transmission and the quantum efficiency of the CCD. If the input is in DN, then be sure to set the gain to the proper value for the CCD. If the input is in electrons per second, then keep the gain as its default of 1.0. sky_eps : float Number of electrons (photons) or DN per second per pixel from the sky background. Should already be scaled by filter transmission and QE. This must be in the same units as source_eps for the calculation to make sense. dark_eps : float Number of thermal electrons per second per pixel. If this is given in DN or ADU, then multiply by the gain to get the value in electrons. rd : float Read noise of the CCD in electrons. If this is given in DN or ADU, then multiply by the gain to get the value in electrons. npix : float Size of the aperture in pixels gain : float, optional Gain of the CCD. In units of electrons per DN. Returns ------- SNR : float or numpy.ndarray Signal to noise ratio calculated from the inputs """ signal = t * source_eps * gain noise = np.sqrt(t * (source_eps * gain + npix * (sky_eps * gain + dark_eps)) + npix * rd ** 2) return signal / noise def bootstrap(data, bootnum=100, samples=None, bootfunc=None): """Performs bootstrap resampling on numpy arrays. Bootstrap resampling is used to understand confidence intervals of sample estimates. This function returns versions of the dataset resampled with replacement ("case bootstrapping"). These can all be run through a function or statistic to produce a distribution of values which can then be used to find the confidence intervals. Parameters ---------- data : ndarray N-D array. The bootstrap resampling will be performed on the first index, so the first index should access the relevant information to be bootstrapped. bootnum : int, optional Number of bootstrap resamples samples : int, optional Number of samples in each resample. The default `None` sets samples to the number of datapoints bootfunc : function, optional Function to reduce the resampled data. Each bootstrap resample will be put through this function and the results returned. If `None`, the bootstrapped data will be returned Returns ------- boot : ndarray If bootfunc is None, then each row is a bootstrap resample of the data. If bootfunc is specified, then the columns will correspond to the outputs of bootfunc. Examples -------- Obtain a twice resampled array: >>> from astropy.stats import bootstrap >>> import numpy as np >>> from astropy.utils import NumpyRNGContext >>> bootarr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) >>> with NumpyRNGContext(1): ... bootresult = bootstrap(bootarr, 2) ... >>> bootresult # doctest: +FLOAT_CMP array([[6., 9., 0., 6., 1., 1., 2., 8., 7., 0.], [3., 5., 6., 3., 5., 3., 5., 8., 8., 0.]]) >>> bootresult.shape (2, 10) Obtain a statistic on the array >>> with NumpyRNGContext(1): ... bootresult = bootstrap(bootarr, 2, bootfunc=np.mean) ... >>> bootresult # doctest: +FLOAT_CMP array([4. , 4.6]) Obtain a statistic with two outputs on the array >>> test_statistic = lambda x: (np.sum(x), np.mean(x)) >>> with NumpyRNGContext(1): ... bootresult = bootstrap(bootarr, 3, bootfunc=test_statistic) >>> bootresult # doctest: +FLOAT_CMP array([[40. , 4. ], [46. , 4.6], [35. , 3.5]]) >>> bootresult.shape (3, 2) Obtain a statistic with two outputs on the array, keeping only the first output >>> bootfunc = lambda x:test_statistic(x)[0] >>> with NumpyRNGContext(1): ... bootresult = bootstrap(bootarr, 3, bootfunc=bootfunc) ... >>> bootresult # doctest: +FLOAT_CMP array([40., 46., 35.]) >>> bootresult.shape (3,) """ if samples is None: samples = data.shape[0] # make sure the input is sane if samples < 1 or bootnum < 1: raise ValueError("neither 'samples' nor 'bootnum' can be less than 1.") if bootfunc is None: resultdims = (bootnum,) + (samples,) + data.shape[1:] else: # test number of outputs from bootfunc, avoid single outputs which are # array-like try: resultdims = (bootnum, len(bootfunc(data))) except TypeError: resultdims = (bootnum,) # create empty boot array boot = np.empty(resultdims) for i in range(bootnum): bootarr = np.random.randint(low=0, high=data.shape[0], size=samples) if bootfunc is None: boot[i] = data[bootarr] else: boot[i] = bootfunc(data[bootarr]) return boot def _scipy_kraft_burrows_nousek(N, B, CL): '''Upper limit on a poisson count rate The implementation is based on Kraft, Burrows and Nousek `ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_. The XMM-Newton upper limit server uses the same formalism. Parameters ---------- N : int or np.int32/np.int64 Total observed count number B : float or np.float32/np.float64 Background count rate (assumed to be known with negligible error from a large background area). CL : float or np.float32/np.float64 Confidence level (number between 0 and 1) Returns ------- S : source count limit Notes ----- Requires :mod:`~scipy`. This implementation will cause Overflow Errors for about N > 100 (the exact limit depends on details of how scipy was compiled). See `~astropy.stats.mpmath_poisson_upper_limit` for an implementation that is slower, but can deal with arbitrarily high numbers since it is based on the `mpmath <http://mpmath.org/>`_ library. ''' from scipy.optimize import brentq from scipy.integrate import quad from scipy.special import factorial from math import exp def eqn8(N, B): n = np.arange(N + 1, dtype=np.float64) return 1. / (exp(-B) * np.sum(np.power(B, n) / factorial(n))) # The parameters of eqn8 do not vary between calls so we can calculate the # result once and reuse it. The same is True for the factorial of N. # eqn7 is called hundred times so "caching" these values yields a # significant speedup (factor 10). eqn8_res = eqn8(N, B) factorial_N = float(math.factorial(N)) def eqn7(S, N, B): SpB = S + B return eqn8_res * (exp(-SpB) * SpB**N / factorial_N) def eqn9_left(S_min, S_max, N, B): return quad(eqn7, S_min, S_max, args=(N, B), limit=500) def find_s_min(S_max, N, B): ''' Kraft, Burrows and Nousek suggest to integrate from N-B in both directions at once, so that S_min and S_max move similarly (see the article for details). Here, this is implemented differently: Treat S_max as the optimization parameters in func and then calculate the matching s_min that has has eqn7(S_max) = eqn7(S_min) here. ''' y_S_max = eqn7(S_max, N, B) if eqn7(0, N, B) >= y_S_max: return 0. else: return brentq(lambda x: eqn7(x, N, B) - y_S_max, 0, N - B) def func(s): s_min = find_s_min(s, N, B) out = eqn9_left(s_min, s, N, B) return out[0] - CL S_max = brentq(func, N - B, 100) S_min = find_s_min(S_max, N, B) return S_min, S_max def _mpmath_kraft_burrows_nousek(N, B, CL): '''Upper limit on a poisson count rate The implementation is based on Kraft, Burrows and Nousek in `ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_. The XMM-Newton upper limit server used the same formalism. Parameters ---------- N : int or np.int32/np.int64 Total observed count number B : float or np.float32/np.float64 Background count rate (assumed to be known with negligible error from a large background area). CL : float or np.float32/np.float64 Confidence level (number between 0 and 1) Returns ------- S : source count limit Notes ----- Requires the `mpmath <http://mpmath.org/>`_ library. See `~astropy.stats.scipy_poisson_upper_limit` for an implementation that is based on scipy and evaluates faster, but runs only to about N = 100. ''' from mpmath import mpf, factorial, findroot, fsum, power, exp, quad # We convert these values to float. Because for some reason, # mpmath.mpf cannot convert from numpy.int64 N = mpf(float(N)) B = mpf(float(B)) CL = mpf(float(CL)) tol = 1e-4 def eqn8(N, B): sumterms = [power(B, n) / factorial(n) for n in range(int(N) + 1)] return 1. / (exp(-B) * fsum(sumterms)) eqn8_res = eqn8(N, B) factorial_N = factorial(N) def eqn7(S, N, B): SpB = S + B return eqn8_res * (exp(-SpB) * SpB**N / factorial_N) def eqn9_left(S_min, S_max, N, B): def eqn7NB(S): return eqn7(S, N, B) return quad(eqn7NB, [S_min, S_max]) def find_s_min(S_max, N, B): ''' Kraft, Burrows and Nousek suggest to integrate from N-B in both directions at once, so that S_min and S_max move similarly (see the article for details). Here, this is implemented differently: Treat S_max as the optimization parameters in func and then calculate the matching s_min that has has eqn7(S_max) = eqn7(S_min) here. ''' y_S_max = eqn7(S_max, N, B) # If B > N, then N-B, the "most probable" values is < 0 # and thus s_min is certainly 0. # Note: For small N, s_max is also close to 0 and root finding # might find the wrong root, thus it is important to handle this # case here and return the analytical answer (s_min = 0). if (B >= N) or (eqn7(0, N, B) >= y_S_max): return 0. else: def eqn7ysmax(x): return eqn7(x, N, B) - y_S_max return findroot(eqn7ysmax, [0., N - B], solver='ridder', tol=tol) def func(s): s_min = find_s_min(s, N, B) out = eqn9_left(s_min, s, N, B) return out - CL # Several numerical problems were found prevent the solvers from finding # the roots unless the starting values are very close to the final values. # Thus, this primitive, time-wasting, brute-force stepping here to get # an interval that can be fed into the ridder solver. s_max_guess = max(N - B, 1.) while func(s_max_guess) < 0: s_max_guess += 1 S_max = findroot(func, [s_max_guess - 1, s_max_guess], solver='ridder', tol=tol) S_min = find_s_min(S_max, N, B) return float(S_min), float(S_max) def _kraft_burrows_nousek(N, B, CL): '''Upper limit on a poisson count rate The implementation is based on Kraft, Burrows and Nousek in `ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_. The XMM-Newton upper limit server used the same formalism. Parameters ---------- N : int or np.int32/np.int64 Total observed count number B : float or np.float32/np.float64 Background count rate (assumed to be known with negligible error from a large background area). CL : float or np.float32/np.float64 Confidence level (number between 0 and 1) Returns ------- S : source count limit Notes ----- This functions has an optional dependency: Either :mod:`scipy` or `mpmath <http://mpmath.org/>`_ need to be available. (Scipy only works for N < 100). ''' from astropy.utils.compat.optional_deps import HAS_SCIPY, HAS_MPMATH if HAS_SCIPY and N <= 100: try: return _scipy_kraft_burrows_nousek(N, B, CL) except OverflowError: if not HAS_MPMATH: raise ValueError('Need mpmath package for input numbers this ' 'large.') if HAS_MPMATH: return _mpmath_kraft_burrows_nousek(N, B, CL) raise ImportError('Either scipy or mpmath are required.') def kuiper_false_positive_probability(D, N): """Compute the false positive probability for the Kuiper statistic. Uses the set of four formulas described in Paltani 2004; they report the resulting function never underestimates the false positive probability but can be a bit high in the N=40..50 range. (They quote a factor 1.5 at the 1e-7 level.) Parameters ---------- D : float The Kuiper test score. N : float The effective sample size. Returns ------- fpp : float The probability of a score this large arising from the null hypothesis. Notes ----- Eq 7 of Paltani 2004 appears to incorrectly quote the original formula (Stephens 1965). This function implements the original formula, as it produces a result closer to Monte Carlo simulations. References ---------- .. [1] Paltani, S., "Searching for periods in X-ray observations using Kuiper's test. Application to the ROSAT PSPC archive", Astronomy and Astrophysics, v.240, p.789-790, 2004. .. [2] Stephens, M. A., "The goodness-of-fit statistic VN: distribution and significance points", Biometrika, v.52, p.309, 1965. """ try: from scipy.special import factorial, comb except ImportError: # Retained for backwards compatibility with older versions of scipy # (factorial appears to have moved here in 0.14) from scipy.misc import factorial, comb if D < 0. or D > 2.: raise ValueError("Must have 0<=D<=2 by definition of the Kuiper test") if D < 2. / N: return 1. - factorial(N) * (D - 1. / N)**(N - 1) elif D < 3. / N: k = -(N * D - 1.) / 2. r = np.sqrt(k**2 - (N * D - 2.)**2 / 2.) a, b = -k + r, -k - r return 1 - (factorial(N - 1) * (b**(N - 1) * (1 - a) - a**(N - 1) * (1 - b)) / N**(N - 2) / (b - a)) elif (D > 0.5 and N % 2 == 0) or (D > (N - 1.) / (2. * N) and N % 2 == 1): # NOTE: the upper limit of this sum is taken from Stephens 1965 t = np.arange(np.floor(N * (1 - D)) + 1) y = D + t / N Tt = y**(t - 3) * (y**3 * N - y**2 * t * (3 - 2 / N) + y * t * (t - 1) * (3 - 2 / N) / N - t * (t - 1) * (t - 2) / N**2) term1 = comb(N, t) term2 = (1 - D - t / N)**(N - t - 1) # term1 is formally finite, but is approximated by numpy as np.inf for # large values, so we set them to zero manually when they would be # multiplied by zero anyway term1[(term1 == np.inf) & (term2 == 0)] = 0. final_term = Tt * term1 * term2 return final_term.sum() else: z = D * np.sqrt(N) # When m*z>18.82 (sqrt(-log(finfo(double))/2)), exp(-2m**2z**2) # underflows. Cutting off just before avoids triggering a (pointless) # underflow warning if `under="warn"`. ms = np.arange(1, 18.82 / z) S1 = (2 * (4 * ms**2 * z**2 - 1) * np.exp(-2 * ms**2 * z**2)).sum() S2 = (ms**2 * (4 * ms**2 * z**2 - 3) * np.exp(-2 * ms**2 * z**2)).sum() return S1 - 8 * D / 3 * S2 def kuiper(data, cdf=lambda x: x, args=()): """Compute the Kuiper statistic. Use the Kuiper statistic version of the Kolmogorov-Smirnov test to find the probability that a sample like ``data`` was drawn from the distribution whose CDF is given as ``cdf``. .. warning:: This will not work correctly for distributions that are actually discrete (Poisson, for example). Parameters ---------- data : array-like The data values. cdf : callable A callable to evaluate the CDF of the distribution being tested against. Will be called with a vector of all values at once. The default is a uniform distribution. args : list-like, optional Additional arguments to be supplied to cdf. Returns ------- D : float The raw statistic. fpp : float The probability of a D this large arising with a sample drawn from the distribution whose CDF is cdf. Notes ----- The Kuiper statistic resembles the Kolmogorov-Smirnov test in that it is nonparametric and invariant under reparameterizations of the data. The Kuiper statistic, in addition, is equally sensitive throughout the domain, and it is also invariant under cyclic permutations (making it particularly appropriate for analyzing circular data). Returns (D, fpp), where D is the Kuiper D number and fpp is the probability that a value as large as D would occur if data was drawn from cdf. .. warning:: The fpp is calculated only approximately, and it can be as much as 1.5 times the true value. Stephens 1970 claims this is more effective than the KS at detecting changes in the variance of a distribution; the KS is (he claims) more sensitive at detecting changes in the mean. If cdf was obtained from data by fitting, then fpp is not correct and it will be necessary to do Monte Carlo simulations to interpret D. D should normally be independent of the shape of CDF. References ---------- .. [1] Stephens, M. A., "Use of the Kolmogorov-Smirnov, Cramer-Von Mises and Related Statistics Without Extensive Tables", Journal of the Royal Statistical Society. Series B (Methodological), Vol. 32, No. 1. (1970), pp. 115-122. """ data = np.sort(data) cdfv = cdf(data, *args) N = len(data) D = (np.amax(cdfv - np.arange(N) / float(N)) + np.amax((np.arange(N) + 1) / float(N) - cdfv)) return D, kuiper_false_positive_probability(D, N) def kuiper_two(data1, data2): """Compute the Kuiper statistic to compare two samples. Parameters ---------- data1 : array-like The first set of data values. data2 : array-like The second set of data values. Returns ------- D : float The raw test statistic. fpp : float The probability of obtaining two samples this different from the same distribution. .. warning:: The fpp is quite approximate, especially for small samples. """ data1 = np.sort(data1) data2 = np.sort(data2) n1, = data1.shape n2, = data2.shape common_type = np.find_common_type([], [data1.dtype, data2.dtype]) if not (np.issubdtype(common_type, np.number) and not np.issubdtype(common_type, np.complexfloating)): raise ValueError('kuiper_two only accepts real inputs') # nans, if any, are at the end after sorting. if np.isnan(data1[-1]) or np.isnan(data2[-1]): raise ValueError('kuiper_two only accepts non-nan inputs') D = _stats.ks_2samp(np.asarray(data1, common_type), np.asarray(data2, common_type)) Ne = len(data1) * len(data2) / float(len(data1) + len(data2)) return D, kuiper_false_positive_probability(D, Ne) def fold_intervals(intervals): """Fold the weighted intervals to the interval (0,1). Convert a list of intervals (ai, bi, wi) to a list of non-overlapping intervals covering (0,1). Each output interval has a weight equal to the sum of the wis of all the intervals that include it. All intervals are interpreted modulo 1, and weights are accumulated counting multiplicity. This is appropriate, for example, if you have one or more blocks of observation and you want to determine how much observation time was spent on different parts of a system's orbit (the blocks should be converted to units of the orbital period first). Parameters ---------- intervals : list of (3,) tuple For each tuple (ai,bi,wi); ai and bi are the limits of the interval, and wi is the weight to apply to the interval. Returns ------- breaks : (N,) array of float The endpoints of a set of intervals covering [0,1]; breaks[0]=0 and breaks[-1] = 1 weights : (N-1,) array of float The ith element is the sum of number of times the interval breaks[i],breaks[i+1] is included in each interval times the weight associated with that interval. """ r = [] breaks = set() tot = 0 for (a, b, wt) in intervals: tot += (np.ceil(b) - np.floor(a)) * wt fa = a % 1 breaks.add(fa) r.append((0, fa, -wt)) fb = b % 1 breaks.add(fb) r.append((fb, 1, -wt)) breaks.add(0.) breaks.add(1.) breaks = sorted(breaks) breaks_map = dict([(f, i) for (i, f) in enumerate(breaks)]) totals = np.zeros(len(breaks) - 1) totals += tot for (a, b, wt) in r: totals[breaks_map[a]:breaks_map[b]] += wt return np.array(breaks), totals def cdf_from_intervals(breaks, totals): """Construct a callable piecewise-linear CDF from a pair of arrays. Take a pair of arrays in the format returned by fold_intervals and make a callable cumulative distribution function on the interval (0,1). Parameters ---------- breaks : (N,) array of float The boundaries of successive intervals. totals : (N-1,) array of float The weight for each interval. Returns ------- f : callable A cumulative distribution function corresponding to the piecewise-constant probability distribution given by breaks, weights """ if breaks[0] != 0 or breaks[-1] != 1: raise ValueError("Intervals must be restricted to [0,1]") if np.any(np.diff(breaks) <= 0): raise ValueError("Breaks must be strictly increasing") if np.any(totals < 0): raise ValueError( "Total weights in each subinterval must be nonnegative") if np.all(totals == 0): raise ValueError("At least one interval must have positive exposure") b = breaks.copy() c = np.concatenate(((0,), np.cumsum(totals * np.diff(b)))) c /= c[-1] return lambda x: np.interp(x, b, c, 0, 1) def interval_overlap_length(i1, i2): """Compute the length of overlap of two intervals. Parameters ---------- i1, i2 : (float, float) The two intervals, (interval 1, interval 2). Returns ------- l : float The length of the overlap between the two intervals. """ (a, b) = i1 (c, d) = i2 if a < c: if b < c: return 0. elif b < d: return b - c else: return d - c elif a < d: if b < d: return b - a else: return d - a else: return 0 def histogram_intervals(n, breaks, totals): """Histogram of a piecewise-constant weight function. This function takes a piecewise-constant weight function and computes the average weight in each histogram bin. Parameters ---------- n : int The number of bins breaks : (N,) array of float Endpoints of the intervals in the PDF totals : (N-1,) array of float Probability densities in each bin Returns ------- h : array of float The average weight for each bin """ h = np.zeros(n) start = breaks[0] for i in range(len(totals)): end = breaks[i + 1] for j in range(n): ol = interval_overlap_length((float(j) / n, float(j + 1) / n), (start, end)) h[j] += ol / (1. / n) * totals[i] start = end return h
e996d496421daf77dfc62a9387601961e930877f78dfcd8acdf6622239f8ec6e
# Licensed under a 3-clause BSD style license - see LICENSE.rst import platform import warnings import numpy as np from .index import get_index_by_names from astropy.utils.exceptions import AstropyUserWarning __all__ = ['TableGroups', 'ColumnGroups'] def table_group_by(table, keys): # index copies are unnecessary and slow down _table_group_by with table.index_mode('discard_on_copy'): return _table_group_by(table, keys) def _table_group_by(table, keys): """ Get groups for ``table`` on specified ``keys``. Parameters ---------- table : `Table` Table to group keys : str, list of str, `Table`, or Numpy array Grouping key specifier Returns ------- grouped_table : Table object with groups attr set accordingly """ from .table import Table from .serialize import represent_mixins_as_columns # Pre-convert string to tuple of strings, or Table to the underlying structured array if isinstance(keys, str): keys = (keys,) if isinstance(keys, (list, tuple)): for name in keys: if name not in table.colnames: raise ValueError(f'Table does not have key column {name!r}') if table.masked and np.any(table[name].mask): raise ValueError(f'Missing values in key column {name!r} are not allowed') # Make a column slice of the table without copying table_keys = table.__class__([table[key] for key in keys], copy=False) # If available get a pre-existing index for these columns table_index = get_index_by_names(table, keys) grouped_by_table_cols = True elif isinstance(keys, (np.ndarray, Table)): table_keys = keys if len(table_keys) != len(table): raise ValueError('Input keys array length {} does not match table length {}' .format(len(table_keys), len(table))) table_index = None grouped_by_table_cols = False else: raise TypeError('Keys input must be string, list, tuple, Table or numpy array, but got {}' .format(type(keys))) # If there is not already an available index and table_keys is a Table then ensure # that all cols (including mixins) are in a form that can sorted with the code below. if not table_index and isinstance(table_keys, Table): table_keys = represent_mixins_as_columns(table_keys) # Get the argsort index `idx_sort`, accounting for particulars try: # take advantage of index internal sort if possible if table_index is not None: idx_sort = table_index.sorted_data() else: idx_sort = table_keys.argsort(kind='mergesort') stable_sort = True except TypeError: # Some versions (likely 1.6 and earlier) of numpy don't support # 'mergesort' for all data types. MacOSX (Darwin) doesn't have a stable # sort by default, nor does Windows, while Linux does (or appears to). idx_sort = table_keys.argsort() stable_sort = platform.system() not in ('Darwin', 'Windows') # Finally do the actual sort of table_keys values table_keys = table_keys[idx_sort] # Get all keys diffs = np.concatenate(([True], table_keys[1:] != table_keys[:-1], [True])) indices = np.flatnonzero(diffs) # If the sort is not stable (preserves original table order) then sort idx_sort in # place within each group. if not stable_sort: for i0, i1 in zip(indices[:-1], indices[1:]): idx_sort[i0:i1].sort() # Make a new table and set the _groups to the appropriate TableGroups object. # Take the subset of the original keys at the indices values (group boundaries). out = table.__class__(table[idx_sort]) out_keys = table_keys[indices[:-1]] if isinstance(out_keys, Table): out_keys.meta['grouped_by_table_cols'] = grouped_by_table_cols out._groups = TableGroups(out, indices=indices, keys=out_keys) return out def column_group_by(column, keys): """ Get groups for ``column`` on specified ``keys`` Parameters ---------- column : Column object Column to group keys : Table or Numpy array of same length as col Grouping key specifier Returns ------- grouped_column : Column object with groups attr set accordingly """ from .table import Table from .serialize import represent_mixins_as_columns if isinstance(keys, Table): keys = represent_mixins_as_columns(keys) keys = keys.as_array() if not isinstance(keys, np.ndarray): raise TypeError(f'Keys input must be numpy array, but got {type(keys)}') if len(keys) != len(column): raise ValueError('Input keys array length {} does not match column length {}' .format(len(keys), len(column))) idx_sort = keys.argsort() keys = keys[idx_sort] # Get all keys diffs = np.concatenate(([True], keys[1:] != keys[:-1], [True])) indices = np.flatnonzero(diffs) # Make a new column and set the _groups to the appropriate ColumnGroups object. # Take the subset of the original keys at the indices values (group boundaries). out = column.__class__(column[idx_sort]) out._groups = ColumnGroups(out, indices=indices, keys=keys[indices[:-1]]) return out class BaseGroups: """ A class to represent groups within a table of heterogeneous data. - ``keys``: key values corresponding to each group - ``indices``: index values in parent table or column corresponding to group boundaries - ``aggregate()``: method to create new table by aggregating within groups """ @property def parent(self): return self.parent_column if isinstance(self, ColumnGroups) else self.parent_table def __iter__(self): self._iter_index = 0 return self def next(self): ii = self._iter_index if ii < len(self.indices) - 1: i0, i1 = self.indices[ii], self.indices[ii + 1] self._iter_index += 1 return self.parent[i0:i1] else: raise StopIteration __next__ = next def __getitem__(self, item): parent = self.parent if isinstance(item, (int, np.integer)): i0, i1 = self.indices[item], self.indices[item + 1] out = parent[i0:i1] out.groups._keys = parent.groups.keys[item] else: indices0, indices1 = self.indices[:-1], self.indices[1:] try: i0s, i1s = indices0[item], indices1[item] except Exception as err: raise TypeError('Index item for groups attribute must be a slice, ' 'numpy mask or int array') from err mask = np.zeros(len(parent), dtype=bool) # Is there a way to vectorize this in numpy? for i0, i1 in zip(i0s, i1s): mask[i0:i1] = True out = parent[mask] out.groups._keys = parent.groups.keys[item] out.groups._indices = np.concatenate([[0], np.cumsum(i1s - i0s)]) return out def __repr__(self): return f'<{self.__class__.__name__} indices={self.indices}>' def __len__(self): return len(self.indices) - 1 class ColumnGroups(BaseGroups): def __init__(self, parent_column, indices=None, keys=None): self.parent_column = parent_column # parent Column self.parent_table = parent_column.info.parent_table self._indices = indices self._keys = keys @property def indices(self): # If the parent column is in a table then use group indices from table if self.parent_table: return self.parent_table.groups.indices else: if self._indices is None: return np.array([0, len(self.parent_column)]) else: return self._indices @property def keys(self): # If the parent column is in a table then use group indices from table if self.parent_table: return self.parent_table.groups.keys else: return self._keys def aggregate(self, func): from .column import MaskedColumn, Column from astropy.utils.compat import NUMPY_LT_1_20 i0s, i1s = self.indices[:-1], self.indices[1:] par_col = self.parent_column masked = isinstance(par_col, MaskedColumn) reduceat = hasattr(func, 'reduceat') sum_case = func is np.sum mean_case = func is np.mean try: if not masked and (reduceat or sum_case or mean_case): # For numpy < 1.20 there is a bug where reduceat will fail to # raise an exception for mixin columns that do not support the # operation. For details see: # https://github.com/astropy/astropy/pull/12825#issuecomment-1082412447 # Instead we try the function directly with a 2-element version # of the column if NUMPY_LT_1_20 and not isinstance(par_col, Column) and len(par_col) > 0: func(par_col[[0, 0]]) if mean_case: vals = np.add.reduceat(par_col, i0s) / np.diff(self.indices) else: if sum_case: func = np.add vals = func.reduceat(par_col, i0s) else: vals = np.array([func(par_col[i0: i1]) for i0, i1 in zip(i0s, i1s)]) out = par_col.__class__(vals) except Exception as err: raise TypeError("Cannot aggregate column '{}' with type '{}': {}" .format(par_col.info.name, par_col.info.dtype, err)) from err out_info = out.info for attr in ('name', 'unit', 'format', 'description', 'meta'): try: setattr(out_info, attr, getattr(par_col.info, attr)) except AttributeError: pass return out def filter(self, func): """ Filter groups in the Column based on evaluating function ``func`` on each group sub-table. The function which is passed to this method must accept one argument: - ``column`` : `Column` object It must then return either `True` or `False`. As an example, the following will select all column groups with only positive values:: def all_positive(column): if np.any(column < 0): return False return True Parameters ---------- func : function Filter function Returns ------- out : Column New column with the aggregated rows. """ mask = np.empty(len(self), dtype=bool) for i, group_column in enumerate(self): mask[i] = func(group_column) return self[mask] class TableGroups(BaseGroups): def __init__(self, parent_table, indices=None, keys=None): self.parent_table = parent_table # parent Table self._indices = indices self._keys = keys @property def key_colnames(self): """ Return the names of columns in the parent table that were used for grouping. """ # If the table was grouped by key columns *in* the table then treat those columns # differently in aggregation. In this case keys will be a Table with # keys.meta['grouped_by_table_cols'] == True. Keys might not be a Table so we # need to handle this. grouped_by_table_cols = getattr(self.keys, 'meta', {}).get('grouped_by_table_cols', False) return self.keys.colnames if grouped_by_table_cols else () @property def indices(self): if self._indices is None: return np.array([0, len(self.parent_table)]) else: return self._indices def aggregate(self, func): """ Aggregate each group in the Table into a single row by applying the reduction function ``func`` to group values in each column. Parameters ---------- func : function Function that reduces an array of values to a single value Returns ------- out : Table New table with the aggregated rows. """ i0s = self.indices[:-1] out_cols = [] parent_table = self.parent_table for col in parent_table.columns.values(): # For key columns just pick off first in each group since they are identical if col.info.name in self.key_colnames: new_col = col.take(i0s) else: try: new_col = col.info.groups.aggregate(func) except TypeError as err: warnings.warn(str(err), AstropyUserWarning) continue out_cols.append(new_col) return parent_table.__class__(out_cols, meta=parent_table.meta) def filter(self, func): """ Filter groups in the Table based on evaluating function ``func`` on each group sub-table. The function which is passed to this method must accept two arguments: - ``table`` : `Table` object - ``key_colnames`` : tuple of column names in ``table`` used as keys for grouping It must then return either `True` or `False`. As an example, the following will select all table groups with only positive values in the non-key columns:: def all_positive(table, key_colnames): colnames = [name for name in table.colnames if name not in key_colnames] for colname in colnames: if np.any(table[colname] < 0): return False return True Parameters ---------- func : function Filter function Returns ------- out : Table New table with the aggregated rows. """ mask = np.empty(len(self), dtype=bool) key_colnames = self.key_colnames for i, group_table in enumerate(self): mask[i] = func(group_table, key_colnames) return self[mask] @property def keys(self): return self._keys
174e33f88b6aba2e6909ca16e2cf60fa24d4d06d6856754547c9a08dfb9d6efd
# Licensed under a 3-clause BSD style license - see LICENSE.rst from .index import SlicedIndex, TableIndices, TableLoc, TableILoc, TableLocIndices import sys from collections import OrderedDict, defaultdict from collections.abc import Mapping import warnings from copy import deepcopy import types import itertools import weakref import numpy as np from numpy import ma from astropy import log from astropy.units import Quantity, QuantityInfo from astropy.utils import isiterable, ShapedLikeNDArray from astropy.utils.console import color_print from astropy.utils.exceptions import AstropyUserWarning from astropy.utils.masked import Masked from astropy.utils.metadata import MetaData, MetaAttribute from astropy.utils.data_info import BaseColumnInfo, MixinInfo, DataInfo from astropy.utils.decorators import format_doc from astropy.io.registry import UnifiedReadWriteMethod from . import groups from .pprint import TableFormatter from .column import (BaseColumn, Column, MaskedColumn, _auto_names, FalseArray, col_copy, _convert_sequence_data_to_array) from .row import Row from .info import TableInfo from .index import Index, _IndexModeContext, get_index from .connect import TableRead, TableWrite from .ndarray_mixin import NdarrayMixin from .mixins.registry import get_mixin_handler from . import conf _implementation_notes = """ This string has informal notes concerning Table implementation for developers. Things to remember: - Table has customizable attributes ColumnClass, Column, MaskedColumn. Table.Column is normally just column.Column (same w/ MaskedColumn) but in theory they can be different. Table.ColumnClass is the default class used to create new non-mixin columns, and this is a function of the Table.masked attribute. Column creation / manipulation in a Table needs to respect these. - Column objects that get inserted into the Table.columns attribute must have the info.parent_table attribute set correctly. Beware just dropping an object into the columns dict since an existing column may be part of another Table and have parent_table set to point at that table. Dropping that column into `columns` of this Table will cause a problem for the old one so the column object needs to be copied (but not necessarily the data). Currently replace_column is always making a copy of both object and data if parent_table is set. This could be improved but requires a generic way to copy a mixin object but not the data. - Be aware of column objects that have indices set. - `cls.ColumnClass` is a property that effectively uses the `masked` attribute to choose either `cls.Column` or `cls.MaskedColumn`. """ __doctest_skip__ = ['Table.read', 'Table.write', 'Table._read', 'Table.convert_bytestring_to_unicode', 'Table.convert_unicode_to_bytestring', ] __doctest_requires__ = {'*pandas': ['pandas>=1.1']} _pprint_docs = """ {__doc__} Parameters ---------- max_lines : int or None Maximum number of lines in table output. max_width : int or None Maximum character width of output. show_name : bool Include a header row for column names. Default is True. show_unit : bool Include a header row for unit. Default is to show a row for units only if one or more columns has a defined value for the unit. show_dtype : bool Include a header row for column dtypes. Default is False. align : str or list or tuple or None Left/right alignment of columns. Default is right (None) for all columns. Other allowed values are '>', '<', '^', and '0=' for right, left, centered, and 0-padded, respectively. A list of strings can be provided for alignment of tables with multiple columns. """ _pformat_docs = """ {__doc__} Parameters ---------- max_lines : int or None Maximum number of rows to output max_width : int or None Maximum character width of output show_name : bool Include a header row for column names. Default is True. show_unit : bool Include a header row for unit. Default is to show a row for units only if one or more columns has a defined value for the unit. show_dtype : bool Include a header row for column dtypes. Default is True. html : bool Format the output as an HTML table. Default is False. tableid : str or None An ID tag for the table; only used if html is set. Default is "table{id}", where id is the unique integer id of the table object, id(self) align : str or list or tuple or None Left/right alignment of columns. Default is right (None) for all columns. Other allowed values are '>', '<', '^', and '0=' for right, left, centered, and 0-padded, respectively. A list of strings can be provided for alignment of tables with multiple columns. tableclass : str or list of str or None CSS classes for the table; only used if html is set. Default is None. Returns ------- lines : list Formatted table as a list of strings. """ class TableReplaceWarning(UserWarning): """ Warning class for cases when a table column is replaced via the Table.__setitem__ syntax e.g. t['a'] = val. This does not inherit from AstropyWarning because we want to use stacklevel=3 to show the user where the issue occurred in their code. """ pass def descr(col): """Array-interface compliant full description of a column. This returns a 3-tuple (name, type, shape) that can always be used in a structured array dtype definition. """ col_dtype = 'O' if (col.info.dtype is None) else col.info.dtype col_shape = col.shape[1:] if hasattr(col, 'shape') else () return (col.info.name, col_dtype, col_shape) def has_info_class(obj, cls): """Check if the object's info is an instance of cls.""" # We check info on the class of the instance, since on the instance # itself accessing 'info' has side effects in that it sets # obj.__dict__['info'] if it does not exist already. return isinstance(getattr(obj.__class__, 'info', None), cls) def _get_names_from_list_of_dict(rows): """Return list of column names if ``rows`` is a list of dict that defines table data. If rows is not a list of dict then return None. """ if rows is None: return None names = set() for row in rows: if not isinstance(row, Mapping): return None names.update(row) return list(names) # Note to future maintainers: when transitioning this to dict # be sure to change the OrderedDict ref(s) in Row and in __len__(). class TableColumns(OrderedDict): """OrderedDict subclass for a set of columns. This class enhances item access to provide convenient access to columns by name or index, including slice access. It also handles renaming of columns. The initialization argument ``cols`` can be a list of ``Column`` objects or any structure that is valid for initializing a Python dict. This includes a dict, list of (key, val) tuples or [key, val] lists, etc. Parameters ---------- cols : dict, list, tuple; optional Column objects as data structure that can init dict (see above) """ def __init__(self, cols={}): if isinstance(cols, (list, tuple)): # `cols` should be a list of two-tuples, but it is allowed to have # columns (BaseColumn or mixins) in the list. newcols = [] for col in cols: if has_info_class(col, BaseColumnInfo): newcols.append((col.info.name, col)) else: newcols.append(col) cols = newcols super().__init__(cols) def __getitem__(self, item): """Get items from a TableColumns object. :: tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')]) tc['a'] # Column('a') tc[1] # Column('b') tc['a', 'b'] # <TableColumns names=('a', 'b')> tc[1:3] # <TableColumns names=('b', 'c')> """ if isinstance(item, str): return OrderedDict.__getitem__(self, item) elif isinstance(item, (int, np.integer)): return list(self.values())[item] elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'): return list(self.values())[item.item()] elif isinstance(item, tuple): return self.__class__([self[x] for x in item]) elif isinstance(item, slice): return self.__class__([self[x] for x in list(self)[item]]) else: raise IndexError('Illegal key or index value for {} object' .format(self.__class__.__name__)) def __setitem__(self, item, value, validated=False): """ Set item in this dict instance, but do not allow directly replacing an existing column unless it is already validated (and thus is certain to not corrupt the table). NOTE: it is easily possible to corrupt a table by directly *adding* a new key to the TableColumns attribute of a Table, e.g. ``t.columns['jane'] = 'doe'``. """ if item in self and not validated: raise ValueError("Cannot replace column '{}'. Use Table.replace_column() instead." .format(item)) super().__setitem__(item, value) def __repr__(self): names = (f"'{x}'" for x in self.keys()) return f"<{self.__class__.__name__} names=({','.join(names)})>" def _rename_column(self, name, new_name): if name == new_name: return if new_name in self: raise KeyError(f"Column {new_name} already exists") # Rename column names in pprint include/exclude attributes as needed parent_table = self[name].info.parent_table if parent_table is not None: parent_table.pprint_exclude_names._rename(name, new_name) parent_table.pprint_include_names._rename(name, new_name) mapper = {name: new_name} new_names = [mapper.get(name, name) for name in self] cols = list(self.values()) self.clear() self.update(list(zip(new_names, cols))) def __delitem__(self, name): # Remove column names from pprint include/exclude attributes as needed. # __delitem__ also gets called for pop() and popitem(). parent_table = self[name].info.parent_table if parent_table is not None: # _remove() method does not require that `name` is in the attribute parent_table.pprint_exclude_names._remove(name) parent_table.pprint_include_names._remove(name) return super().__delitem__(name) def isinstance(self, cls): """ Return a list of columns which are instances of the specified classes. Parameters ---------- cls : class or tuple thereof Column class (including mixin) or tuple of Column classes. Returns ------- col_list : list of `Column` List of Column objects which are instances of given classes. """ cols = [col for col in self.values() if isinstance(col, cls)] return cols def not_isinstance(self, cls): """ Return a list of columns which are not instances of the specified classes. Parameters ---------- cls : class or tuple thereof Column class (including mixin) or tuple of Column classes. Returns ------- col_list : list of `Column` List of Column objects which are not instances of given classes. """ cols = [col for col in self.values() if not isinstance(col, cls)] return cols class TableAttribute(MetaAttribute): """ Descriptor to define a custom attribute for a Table subclass. The value of the ``TableAttribute`` will be stored in a dict named ``__attributes__`` that is stored in the table ``meta``. The attribute can be accessed and set in the usual way, and it can be provided when creating the object. Defining an attribute by this mechanism ensures that it will persist if the table is sliced or serialized, for example as a pickle or ECSV file. See the `~astropy.utils.metadata.MetaAttribute` documentation for additional details. Parameters ---------- default : object Default value for attribute Examples -------- >>> from astropy.table import Table, TableAttribute >>> class MyTable(Table): ... identifier = TableAttribute(default=1) >>> t = MyTable(identifier=10) >>> t.identifier 10 >>> t.meta OrderedDict([('__attributes__', {'identifier': 10})]) """ class PprintIncludeExclude(TableAttribute): """Maintain tuple that controls table column visibility for print output. This is a descriptor that inherits from MetaAttribute so that the attribute value is stored in the table meta['__attributes__']. This gets used for the ``pprint_include_names`` and ``pprint_exclude_names`` Table attributes. """ def __get__(self, instance, owner_cls): """Get the attribute. This normally returns an instance of this class which is stored on the owner object. """ # For getting from class not an instance if instance is None: return self # If not already stored on `instance`, make a copy of the class # descriptor object and put it onto the instance. value = instance.__dict__.get(self.name) if value is None: value = deepcopy(self) instance.__dict__[self.name] = value # We set _instance_ref on every call, since if one makes copies of # instances, this attribute will be copied as well, which will lose the # reference. value._instance_ref = weakref.ref(instance) return value def __set__(self, instance, names): """Set value of ``instance`` attribute to ``names``. Parameters ---------- instance : object Instance that owns the attribute names : None, str, list, tuple Column name(s) to store, or None to clear """ if isinstance(names, str): names = [names] if names is None: # Remove attribute value from the meta['__attributes__'] dict. # Subsequent access will just return None. delattr(instance, self.name) else: # This stores names into instance.meta['__attributes__'] as tuple return super().__set__(instance, tuple(names)) def __call__(self): """Get the value of the attribute. Returns ------- names : None, tuple Include/exclude names """ # Get the value from instance.meta['__attributes__'] instance = self._instance_ref() return super().__get__(instance, instance.__class__) def __repr__(self): if hasattr(self, '_instance_ref'): out = f'<{self.__class__.__name__} name={self.name} value={self()}>' else: out = super().__repr__() return out def _add_remove_setup(self, names): """Common setup for add and remove. - Coerce attribute value to a list - Coerce names into a list - Get the parent table instance """ names = [names] if isinstance(names, str) else list(names) # Get the value. This is the same as self() but we need `instance` here. instance = self._instance_ref() value = super().__get__(instance, instance.__class__) value = [] if value is None else list(value) return instance, names, value def add(self, names): """Add ``names`` to the include/exclude attribute. Parameters ---------- names : str, list, tuple Column name(s) to add """ instance, names, value = self._add_remove_setup(names) value.extend(name for name in names if name not in value) super().__set__(instance, tuple(value)) def remove(self, names): """Remove ``names`` from the include/exclude attribute. Parameters ---------- names : str, list, tuple Column name(s) to remove """ self._remove(names, raise_exc=True) def _remove(self, names, raise_exc=False): """Remove ``names`` with optional checking if they exist""" instance, names, value = self._add_remove_setup(names) # Return now if there are no attributes and thus no action to be taken. if not raise_exc and '__attributes__' not in instance.meta: return # Remove one by one, optionally raising an exception if name is missing. for name in names: if name in value: value.remove(name) # Using the list.remove method elif raise_exc: raise ValueError(f'{name} not in {self.name}') # Change to either None or a tuple for storing back to attribute value = None if value == [] else tuple(value) self.__set__(instance, value) def _rename(self, name, new_name): """Rename ``name`` to ``new_name`` if ``name`` is in the list""" names = self() or () if name in names: new_names = list(names) new_names[new_names.index(name)] = new_name self.set(new_names) def set(self, names): """Set value of include/exclude attribute to ``names``. Parameters ---------- names : None, str, list, tuple Column name(s) to store, or None to clear """ class _Context: def __init__(self, descriptor_self): self.descriptor_self = descriptor_self self.names_orig = descriptor_self() def __enter__(self): pass def __exit__(self, type, value, tb): descriptor_self = self.descriptor_self instance = descriptor_self._instance_ref() descriptor_self.__set__(instance, self.names_orig) def __repr__(self): return repr(self.descriptor_self) ctx = _Context(descriptor_self=self) instance = self._instance_ref() self.__set__(instance, names) return ctx class Table: """A class to represent tables of heterogeneous data. `~astropy.table.Table` provides a class for heterogeneous tabular data. A key enhancement provided by the `~astropy.table.Table` class over e.g. a `numpy` structured array is the ability to easily modify the structure of the table by adding or removing columns, or adding new rows of data. In addition table and column metadata are fully supported. `~astropy.table.Table` differs from `~astropy.nddata.NDData` by the assumption that the input data consists of columns of homogeneous data, where each column has a unique identifier and may contain additional metadata such as the data unit, format, and description. See also: https://docs.astropy.org/en/stable/table/ Parameters ---------- data : numpy ndarray, dict, list, table-like object, optional Data to initialize table. masked : bool, optional Specify whether the table is masked. names : list, optional Specify column names. dtype : list, optional Specify column data types. meta : dict, optional Metadata associated with the table. copy : bool, optional Copy the input data. If the input is a Table the ``meta`` is always copied regardless of the ``copy`` parameter. Default is True. rows : numpy ndarray, list of list, optional Row-oriented data for table instead of ``data`` argument. copy_indices : bool, optional Copy any indices in the input data. Default is True. units : list, dict, optional List or dict of units to apply to columns. descriptions : list, dict, optional List or dict of descriptions to apply to columns. **kwargs : dict, optional Additional keyword args when converting table-like object. """ meta = MetaData(copy=False) # Define class attributes for core container objects to allow for subclass # customization. Row = Row Column = Column MaskedColumn = MaskedColumn TableColumns = TableColumns TableFormatter = TableFormatter # Unified I/O read and write methods from .connect read = UnifiedReadWriteMethod(TableRead) write = UnifiedReadWriteMethod(TableWrite) pprint_exclude_names = PprintIncludeExclude() pprint_include_names = PprintIncludeExclude() def as_array(self, keep_byteorder=False, names=None): """ Return a new copy of the table in the form of a structured np.ndarray or np.ma.MaskedArray object (as appropriate). Parameters ---------- keep_byteorder : bool, optional By default the returned array has all columns in native byte order. However, if this option is `True` this preserves the byte order of all columns (if any are non-native). names : list, optional: List of column names to include for returned structured array. Default is to include all table columns. Returns ------- table_array : array or `~numpy.ma.MaskedArray` Copy of table as a numpy structured array. ndarray for unmasked or `~numpy.ma.MaskedArray` for masked. """ masked = self.masked or self.has_masked_columns or self.has_masked_values empty_init = ma.empty if masked else np.empty if len(self.columns) == 0: return empty_init(0, dtype=None) dtype = [] cols = self.columns.values() if names is not None: cols = [col for col in cols if col.info.name in names] for col in cols: col_descr = descr(col) if not (col.info.dtype.isnative or keep_byteorder): new_dt = np.dtype(col_descr[1]).newbyteorder('=') col_descr = (col_descr[0], new_dt, col_descr[2]) dtype.append(col_descr) data = empty_init(len(self), dtype=dtype) for col in cols: # When assigning from one array into a field of a structured array, # Numpy will automatically swap those columns to their destination # byte order where applicable data[col.info.name] = col # For masked out, masked mixin columns need to set output mask attribute. if masked and has_info_class(col, MixinInfo) and hasattr(col, 'mask'): data[col.info.name].mask = col.mask return data def __init__(self, data=None, masked=False, names=None, dtype=None, meta=None, copy=True, rows=None, copy_indices=True, units=None, descriptions=None, **kwargs): # Set up a placeholder empty table self._set_masked(masked) self.columns = self.TableColumns() self.formatter = self.TableFormatter() self._copy_indices = True # copy indices from this Table by default self._init_indices = copy_indices # whether to copy indices in init self.primary_key = None # Must copy if dtype are changing if not copy and dtype is not None: raise ValueError('Cannot specify dtype when copy=False') # Specifies list of names found for the case of initializing table with # a list of dict. If data are not list of dict then this is None. names_from_list_of_dict = None # Row-oriented input, e.g. list of lists or list of tuples, list of # dict, Row instance. Set data to something that the subsequent code # will parse correctly. if rows is not None: if data is not None: raise ValueError('Cannot supply both `data` and `rows` values') if isinstance(rows, types.GeneratorType): # Without this then the all(..) test below uses up the generator rows = list(rows) # Get column names if `rows` is a list of dict, otherwise this is None names_from_list_of_dict = _get_names_from_list_of_dict(rows) if names_from_list_of_dict: data = rows elif isinstance(rows, self.Row): data = rows else: data = list(zip(*rows)) # Infer the type of the input data and set up the initialization # function, number of columns, and potentially the default col names default_names = None # Handle custom (subclass) table attributes that are stored in meta. # These are defined as class attributes using the TableAttribute # descriptor. Any such attributes get removed from kwargs here and # stored for use after the table is otherwise initialized. Any values # provided via kwargs will have precedence over existing values from # meta (e.g. from data as a Table or meta via kwargs). meta_table_attrs = {} if kwargs: for attr in list(kwargs): descr = getattr(self.__class__, attr, None) if isinstance(descr, TableAttribute): meta_table_attrs[attr] = kwargs.pop(attr) if hasattr(data, '__astropy_table__'): # Data object implements the __astropy_table__ interface method. # Calling that method returns an appropriate instance of # self.__class__ and respects the `copy` arg. The returned # Table object should NOT then be copied. data = data.__astropy_table__(self.__class__, copy, **kwargs) copy = False elif kwargs: raise TypeError('__init__() got unexpected keyword argument {!r}' .format(list(kwargs.keys())[0])) if (isinstance(data, np.ndarray) and data.shape == (0,) and not data.dtype.names): data = None if isinstance(data, self.Row): data = data._table[data._index:data._index + 1] if isinstance(data, (list, tuple)): # Get column names from `data` if it is a list of dict, otherwise this is None. # This might be previously defined if `rows` was supplied as an init arg. names_from_list_of_dict = (names_from_list_of_dict or _get_names_from_list_of_dict(data)) if names_from_list_of_dict: init_func = self._init_from_list_of_dicts n_cols = len(names_from_list_of_dict) else: init_func = self._init_from_list n_cols = len(data) elif isinstance(data, np.ndarray): if data.dtype.names: init_func = self._init_from_ndarray # _struct n_cols = len(data.dtype.names) default_names = data.dtype.names else: init_func = self._init_from_ndarray # _homog if data.shape == (): raise ValueError('Can not initialize a Table with a scalar') elif len(data.shape) == 1: data = data[np.newaxis, :] n_cols = data.shape[1] elif isinstance(data, Mapping): init_func = self._init_from_dict default_names = list(data) n_cols = len(default_names) elif isinstance(data, Table): # If user-input meta is None then use data.meta (if non-trivial) if meta is None and data.meta: # At this point do NOT deepcopy data.meta as this will happen after # table init_func() is called. But for table input the table meta # gets a key copy here if copy=False because later a direct object ref # is used. meta = data.meta if copy else data.meta.copy() # Handle indices on input table. Copy primary key and don't copy indices # if the input Table is in non-copy mode. self.primary_key = data.primary_key self._init_indices = self._init_indices and data._copy_indices # Extract default names, n_cols, and then overwrite ``data`` to be the # table columns so we can use _init_from_list. default_names = data.colnames n_cols = len(default_names) data = list(data.columns.values()) init_func = self._init_from_list elif data is None: if names is None: if dtype is None: # Table was initialized as `t = Table()`. Set up for empty # table with names=[], data=[], and n_cols=0. # self._init_from_list() will simply return, giving the # expected empty table. names = [] else: try: # No data nor names but dtype is available. This must be # valid to initialize a structured array. dtype = np.dtype(dtype) names = dtype.names dtype = [dtype[name] for name in names] except Exception: raise ValueError('dtype was specified but could not be ' 'parsed for column names') # names is guaranteed to be set at this point init_func = self._init_from_list n_cols = len(names) data = [[]] * n_cols else: raise ValueError(f'Data type {type(data)} not allowed to init Table') # Set up defaults if names and/or dtype are not specified. # A value of None means the actual value will be inferred # within the appropriate initialization routine, either from # existing specification or auto-generated. if dtype is None: dtype = [None] * n_cols elif isinstance(dtype, np.dtype): if default_names is None: default_names = dtype.names # Convert a numpy dtype input to a list of dtypes for later use. dtype = [dtype[name] for name in dtype.names] if names is None: names = default_names or [None] * n_cols names = [None if name is None else str(name) for name in names] self._check_names_dtype(names, dtype, n_cols) # Finally do the real initialization init_func(data, names, dtype, n_cols, copy) # Set table meta. If copy=True then deepcopy meta otherwise use the # user-supplied meta directly. if meta is not None: self.meta = deepcopy(meta) if copy else meta # Update meta with TableAttributes supplied as kwargs in Table init. # This takes precedence over previously-defined meta. if meta_table_attrs: for attr, value in meta_table_attrs.items(): setattr(self, attr, value) # Whatever happens above, the masked property should be set to a boolean if self.masked not in (None, True, False): raise TypeError("masked property must be None, True or False") self._set_column_attribute('unit', units) self._set_column_attribute('description', descriptions) def _set_column_attribute(self, attr, values): """Set ``attr`` for columns to ``values``, which can be either a dict (keyed by column name) or a dict of name: value pairs. This is used for handling the ``units`` and ``descriptions`` kwargs to ``__init__``. """ if not values: return if isinstance(values, Row): # For a Row object transform to an equivalent dict. values = {name: values[name] for name in values.colnames} if not isinstance(values, Mapping): # If not a dict map, assume iterable and map to dict if the right length if len(values) != len(self.columns): raise ValueError(f'sequence of {attr} values must match number of columns') values = dict(zip(self.colnames, values)) for name, value in values.items(): if name not in self.columns: raise ValueError(f'invalid column name {name} for setting {attr} attribute') # Special case: ignore unit if it is an empty or blank string if attr == 'unit' and isinstance(value, str): if value.strip() == '': value = None if value not in (np.ma.masked, None): setattr(self[name].info, attr, value) def __getstate__(self): columns = OrderedDict((key, col if isinstance(col, BaseColumn) else col_copy(col)) for key, col in self.columns.items()) return (columns, self.meta) def __setstate__(self, state): columns, meta = state self.__init__(columns, meta=meta) @property def mask(self): # Dynamic view of available masks if self.masked or self.has_masked_columns or self.has_masked_values: mask_table = Table([getattr(col, 'mask', FalseArray(col.shape)) for col in self.itercols()], names=self.colnames, copy=False) # Set hidden attribute to force inplace setitem so that code like # t.mask['a'] = [1, 0, 1] will correctly set the underlying mask. # See #5556 for discussion. mask_table._setitem_inplace = True else: mask_table = None return mask_table @mask.setter def mask(self, val): self.mask[:] = val @property def _mask(self): """This is needed so that comparison of a masked Table and a MaskedArray works. The requirement comes from numpy.ma.core so don't remove this property.""" return self.as_array().mask def filled(self, fill_value=None): """Return copy of self, with masked values filled. If input ``fill_value`` supplied then that value is used for all masked entries in the table. Otherwise the individual ``fill_value`` defined for each table column is used. Parameters ---------- fill_value : str If supplied, this ``fill_value`` is used for all masked entries in the entire table. Returns ------- filled_table : `~astropy.table.Table` New table with masked values filled """ if self.masked or self.has_masked_columns or self.has_masked_values: # Get new columns with masked values filled, then create Table with those # new cols (copy=False) but deepcopy the meta. data = [col.filled(fill_value) if hasattr(col, 'filled') else col for col in self.itercols()] return self.__class__(data, meta=deepcopy(self.meta), copy=False) else: # Return copy of the original object. return self.copy() @property def indices(self): ''' Return the indices associated with columns of the table as a TableIndices object. ''' lst = [] for column in self.columns.values(): for index in column.info.indices: if sum([index is x for x in lst]) == 0: # ensure uniqueness lst.append(index) return TableIndices(lst) @property def loc(self): ''' Return a TableLoc object that can be used for retrieving rows by index in a given data range. Note that both loc and iloc work only with single-column indices. ''' return TableLoc(self) @property def loc_indices(self): """ Return a TableLocIndices object that can be used for retrieving the row indices corresponding to given table index key value or values. """ return TableLocIndices(self) @property def iloc(self): ''' Return a TableILoc object that can be used for retrieving indexed rows in the order they appear in the index. ''' return TableILoc(self) def add_index(self, colnames, engine=None, unique=False): ''' Insert a new index among one or more columns. If there are no indices, make this index the primary table index. Parameters ---------- colnames : str or list List of column names (or a single column name) to index engine : type or None Indexing engine class to use, either `~astropy.table.SortedArray`, `~astropy.table.BST`, or `~astropy.table.SCEngine`. If the supplied argument is None (by default), use `~astropy.table.SortedArray`. unique : bool Whether the values of the index must be unique. Default is False. ''' if isinstance(colnames, str): colnames = (colnames,) columns = self.columns[tuple(colnames)].values() # make sure all columns support indexing for col in columns: if not getattr(col.info, '_supports_indexing', False): raise ValueError('Cannot create an index on column "{}", of ' 'type "{}"'.format(col.info.name, type(col))) is_primary = not self.indices index = Index(columns, engine=engine, unique=unique) sliced_index = SlicedIndex(index, slice(0, 0, None), original=True) if is_primary: self.primary_key = colnames for col in columns: col.info.indices.append(sliced_index) def remove_indices(self, colname): ''' Remove all indices involving the given column. If the primary index is removed, the new primary index will be the most recently added remaining index. Parameters ---------- colname : str Name of column ''' col = self.columns[colname] for index in self.indices: try: index.col_position(col.info.name) except ValueError: pass else: for c in index.columns: c.info.indices.remove(index) def index_mode(self, mode): ''' Return a context manager for an indexing mode. Parameters ---------- mode : str Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'. In 'discard_on_copy' mode, indices are not copied whenever columns or tables are copied. In 'freeze' mode, indices are not modified whenever columns are modified; at the exit of the context, indices refresh themselves based on column values. This mode is intended for scenarios in which one intends to make many additions or modifications in an indexed column. In 'copy_on_getitem' mode, indices are copied when taking column slices as well as table slices, so col[i0:i1] will preserve indices. ''' return _IndexModeContext(self, mode) def __array__(self, dtype=None): """Support converting Table to np.array via np.array(table). Coercion to a different dtype via np.array(table, dtype) is not supported and will raise a ValueError. """ if dtype is not None: raise ValueError('Datatype coercion is not allowed') # This limitation is because of the following unexpected result that # should have made a table copy while changing the column names. # # >>> d = astropy.table.Table([[1,2],[3,4]]) # >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')]) # array([(0, 0), (0, 0)], # dtype=[('a', '<i8'), ('b', '<i8')]) out = self.as_array() return out.data if isinstance(out, np.ma.MaskedArray) else out def _check_names_dtype(self, names, dtype, n_cols): """Make sure that names and dtype are both iterable and have the same length as data. """ for inp_list, inp_str in ((dtype, 'dtype'), (names, 'names')): if not isiterable(inp_list): raise ValueError(f'{inp_str} must be a list or None') if len(names) != n_cols or len(dtype) != n_cols: raise ValueError( 'Arguments "names" and "dtype" must match number of columns') def _init_from_list_of_dicts(self, data, names, dtype, n_cols, copy): """Initialize table from a list of dictionaries representing rows.""" # Define placeholder for missing values as a unique object that cannot # every occur in user data. MISSING = object() # Gather column names that exist in the input `data`. names_from_data = set() for row in data: names_from_data.update(row) if set(data[0].keys()) == names_from_data: names_from_data = list(data[0].keys()) else: names_from_data = sorted(names_from_data) # Note: if set(data[0].keys()) != names_from_data, this will give an # exception later, so NO need to catch here. # Convert list of dict into dict of list (cols), keep track of missing # indexes and put in MISSING placeholders in the `cols` lists. cols = {} missing_indexes = defaultdict(list) for name in names_from_data: cols[name] = [] for ii, row in enumerate(data): try: val = row[name] except KeyError: missing_indexes[name].append(ii) val = MISSING cols[name].append(val) # Fill the missing entries with first values if missing_indexes: for name, indexes in missing_indexes.items(): col = cols[name] first_val = next(val for val in col if val is not MISSING) for index in indexes: col[index] = first_val # prepare initialization if all(name is None for name in names): names = names_from_data self._init_from_dict(cols, names, dtype, n_cols, copy) # Mask the missing values if necessary, converting columns to MaskedColumn # as needed. if missing_indexes: for name, indexes in missing_indexes.items(): col = self[name] # Ensure that any Column subclasses with MISSING values can support # setting masked values. As of astropy 4.0 the test condition below is # always True since _init_from_dict cannot result in mixin columns. if isinstance(col, Column) and not isinstance(col, MaskedColumn): self[name] = self.MaskedColumn(col, copy=False) # Finally do the masking in a mixin-safe way. self[name][indexes] = np.ma.masked return def _init_from_list(self, data, names, dtype, n_cols, copy): """Initialize table from a list of column data. A column can be a Column object, np.ndarray, mixin, or any other iterable object. """ # Special case of initializing an empty table like `t = Table()`. No # action required at this point. if n_cols == 0: return cols = [] default_names = _auto_names(n_cols) for col, name, default_name, dtype in zip(data, names, default_names, dtype): col = self._convert_data_to_col(col, copy, default_name, dtype, name) cols.append(col) self._init_from_cols(cols) def _convert_data_to_col(self, data, copy=True, default_name=None, dtype=None, name=None): """ Convert any allowed sequence data ``col`` to a column object that can be used directly in the self.columns dict. This could be a Column, MaskedColumn, or mixin column. The final column name is determined by:: name or data.info.name or def_name If ``data`` has no ``info`` then ``name = name or def_name``. The behavior of ``copy`` for Column objects is: - copy=True: new class instance with a copy of data and deep copy of meta - copy=False: new class instance with same data and a key-only copy of meta For mixin columns: - copy=True: new class instance with copy of data and deep copy of meta - copy=False: original instance (no copy at all) Parameters ---------- data : object (column-like sequence) Input column data copy : bool Make a copy default_name : str Default name dtype : np.dtype or None Data dtype name : str or None Column name Returns ------- col : Column, MaskedColumn, mixin-column type Object that can be used as a column in self """ data_is_mixin = self._is_mixin_for_table(data) masked_col_cls = (self.ColumnClass if issubclass(self.ColumnClass, self.MaskedColumn) else self.MaskedColumn) try: data0_is_mixin = self._is_mixin_for_table(data[0]) except Exception: # Need broad exception, cannot predict what data[0] raises for arbitrary data data0_is_mixin = False # If the data is not an instance of Column or a mixin class, we can # check the registry of mixin 'handlers' to see if the column can be # converted to a mixin class if (handler := get_mixin_handler(data)) is not None: original_data = data data = handler(data) if not (data_is_mixin := self._is_mixin_for_table(data)): fully_qualified_name = (original_data.__class__.__module__ + '.' + original_data.__class__.__name__) raise TypeError('Mixin handler for object of type ' f'{fully_qualified_name} ' 'did not return a valid mixin column') # Structured ndarray gets viewed as a mixin unless already a valid # mixin class if (not isinstance(data, Column) and not data_is_mixin and isinstance(data, np.ndarray) and len(data.dtype) > 1): data = data.view(NdarrayMixin) data_is_mixin = True # Get the final column name using precedence. Some objects may not # have an info attribute. Also avoid creating info as a side effect. if not name: if isinstance(data, Column): name = data.name or default_name elif 'info' in getattr(data, '__dict__', ()): name = data.info.name or default_name else: name = default_name if isinstance(data, Column): # If self.ColumnClass is a subclass of col, then "upgrade" to ColumnClass, # otherwise just use the original class. The most common case is a # table with masked=True and ColumnClass=MaskedColumn. Then a Column # gets upgraded to MaskedColumn, but the converse (pre-4.0) behavior # of downgrading from MaskedColumn to Column (for non-masked table) # does not happen. col_cls = self._get_col_cls_for_table(data) elif data_is_mixin: # Copy the mixin column attributes if they exist since the copy below # may not get this attribute. col = col_copy(data, copy_indices=self._init_indices) if copy else data col.info.name = name return col elif data0_is_mixin: # Handle case of a sequence of a mixin, e.g. [1*u.m, 2*u.m]. try: col = data[0].__class__(data) col.info.name = name return col except Exception: # If that didn't work for some reason, just turn it into np.array of object data = np.array(data, dtype=object) col_cls = self.ColumnClass elif isinstance(data, (np.ma.MaskedArray, Masked)): # Require that col_cls be a subclass of MaskedColumn, remembering # that ColumnClass could be a user-defined subclass (though more-likely # could be MaskedColumn). col_cls = masked_col_cls elif data is None: # Special case for data passed as the None object (for broadcasting # to an object column). Need to turn data into numpy `None` scalar # object, otherwise `Column` interprets data=None as no data instead # of a object column of `None`. data = np.array(None) col_cls = self.ColumnClass elif not hasattr(data, 'dtype'): # `data` is none of the above, convert to numpy array or MaskedArray # assuming only that it is a scalar or sequence or N-d nested # sequence. This function is relatively intricate and tries to # maintain performance for common cases while handling things like # list input with embedded np.ma.masked entries. If `data` is a # scalar then it gets returned unchanged so the original object gets # passed to `Column` later. data = _convert_sequence_data_to_array(data, dtype) copy = False # Already made a copy above col_cls = masked_col_cls if isinstance(data, np.ma.MaskedArray) else self.ColumnClass else: col_cls = self.ColumnClass try: col = col_cls(name=name, data=data, dtype=dtype, copy=copy, copy_indices=self._init_indices) except Exception: # Broad exception class since we don't know what might go wrong raise ValueError('unable to convert data to Column for Table') col = self._convert_col_for_table(col) return col def _init_from_ndarray(self, data, names, dtype, n_cols, copy): """Initialize table from an ndarray structured array""" data_names = data.dtype.names or _auto_names(n_cols) struct = data.dtype.names is not None names = [name or data_names[i] for i, name in enumerate(names)] cols = ([data[name] for name in data_names] if struct else [data[:, i] for i in range(n_cols)]) self._init_from_list(cols, names, dtype, n_cols, copy) def _init_from_dict(self, data, names, dtype, n_cols, copy): """Initialize table from a dictionary of columns""" data_list = [data[name] for name in names] self._init_from_list(data_list, names, dtype, n_cols, copy) def _get_col_cls_for_table(self, col): """Get the correct column class to use for upgrading any Column-like object. For a masked table, ensure any Column-like object is a subclass of the table MaskedColumn. For unmasked table, ensure any MaskedColumn-like object is a subclass of the table MaskedColumn. If not a MaskedColumn, then ensure that any Column-like object is a subclass of the table Column. """ col_cls = col.__class__ if self.masked: if isinstance(col, Column) and not isinstance(col, self.MaskedColumn): col_cls = self.MaskedColumn else: if isinstance(col, MaskedColumn): if not isinstance(col, self.MaskedColumn): col_cls = self.MaskedColumn elif isinstance(col, Column) and not isinstance(col, self.Column): col_cls = self.Column return col_cls def _convert_col_for_table(self, col): """ Make sure that all Column objects have correct base class for this type of Table. For a base Table this most commonly means setting to MaskedColumn if the table is masked. Table subclasses like QTable override this method. """ if isinstance(col, Column) and not isinstance(col, self.ColumnClass): col_cls = self._get_col_cls_for_table(col) if col_cls is not col.__class__: col = col_cls(col, copy=False) return col def _init_from_cols(self, cols): """Initialize table from a list of Column or mixin objects""" lengths = set(len(col) for col in cols) if len(lengths) > 1: raise ValueError(f'Inconsistent data column lengths: {lengths}') # Make sure that all Column-based objects have correct class. For # plain Table this is self.ColumnClass, but for instance QTable will # convert columns with units to a Quantity mixin. newcols = [self._convert_col_for_table(col) for col in cols] self._make_table_from_cols(self, newcols) # Deduplicate indices. It may happen that after pickling or when # initing from an existing table that column indices which had been # references to a single index object got *copied* into an independent # object. This results in duplicates which will cause downstream problems. index_dict = {} for col in self.itercols(): for i, index in enumerate(col.info.indices or []): names = tuple(ind_col.info.name for ind_col in index.columns) if names in index_dict: col.info.indices[i] = index_dict[names] else: index_dict[names] = index def _new_from_slice(self, slice_): """Create a new table as a referenced slice from self.""" table = self.__class__(masked=self.masked) if self.meta: table.meta = self.meta.copy() # Shallow copy for slice table.primary_key = self.primary_key newcols = [] for col in self.columns.values(): newcol = col[slice_] # Note in line below, use direct attribute access to col.indices for Column # instances instead of the generic col.info.indices. This saves about 4 usec # per column. if (col if isinstance(col, Column) else col.info).indices: # TODO : as far as I can tell the only purpose of setting _copy_indices # here is to communicate that to the initial test in `slice_indices`. # Why isn't that just sent as an arg to the function? col.info._copy_indices = self._copy_indices newcol = col.info.slice_indices(newcol, slice_, len(col)) # Don't understand why this is forcing a value on the original column. # Normally col.info does not even have a _copy_indices attribute. Tests # still pass if this line is deleted. (Each col.info attribute access # is expensive). col.info._copy_indices = True newcols.append(newcol) self._make_table_from_cols(table, newcols, verify=False, names=self.columns.keys()) return table @staticmethod def _make_table_from_cols(table, cols, verify=True, names=None): """ Make ``table`` in-place so that it represents the given list of ``cols``. """ if names is None: names = [col.info.name for col in cols] # Note: we do not test for len(names) == len(cols) if names is not None. In that # case the function is being called by from "trusted" source (e.g. right above here) # that is assumed to provide valid inputs. In that case verify=False. if verify: if None in names: raise TypeError('Cannot have None for column name') if len(set(names)) != len(names): raise ValueError('Duplicate column names') table.columns = table.TableColumns((name, col) for name, col in zip(names, cols)) for col in cols: table._set_col_parent_table_and_mask(col) def _set_col_parent_table_and_mask(self, col): """ Set ``col.parent_table = self`` and force ``col`` to have ``mask`` attribute if the table is masked and ``col.mask`` does not exist. """ # For Column instances it is much faster to do direct attribute access # instead of going through .info col_info = col if isinstance(col, Column) else col.info col_info.parent_table = self # Legacy behavior for masked table if self.masked and not hasattr(col, 'mask'): col.mask = FalseArray(col.shape) def itercols(self): """ Iterate over the columns of this table. Examples -------- To iterate over the columns of a table:: >>> t = Table([[1], [2]]) >>> for col in t.itercols(): ... print(col) col0 ---- 1 col1 ---- 2 Using ``itercols()`` is similar to ``for col in t.columns.values()`` but is syntactically preferred. """ for colname in self.columns: yield self[colname] def _base_repr_(self, html=False, descr_vals=None, max_width=None, tableid=None, show_dtype=True, max_lines=None, tableclass=None): if descr_vals is None: descr_vals = [self.__class__.__name__] if self.masked: descr_vals.append('masked=True') descr_vals.append(f'length={len(self)}') descr = ' '.join(descr_vals) if html: from astropy.utils.xml.writer import xml_escape descr = f'<i>{xml_escape(descr)}</i>\n' else: descr = f'<{descr}>\n' if tableid is None: tableid = f'table{id(self)}' data_lines, outs = self.formatter._pformat_table( self, tableid=tableid, html=html, max_width=max_width, show_name=True, show_unit=None, show_dtype=show_dtype, max_lines=max_lines, tableclass=tableclass) out = descr + '\n'.join(data_lines) return out def _repr_html_(self): out = self._base_repr_(html=True, max_width=-1, tableclass=conf.default_notebook_table_class) # Wrap <table> in <div>. This follows the pattern in pandas and allows # table to be scrollable horizontally in VS Code notebook display. out = f'<div>{out}</div>' return out def __repr__(self): return self._base_repr_(html=False, max_width=None) def __str__(self): return '\n'.join(self.pformat()) def __bytes__(self): return str(self).encode('utf-8') @property def has_mixin_columns(self): """ True if table has any mixin columns (defined as columns that are not Column subclasses). """ return any(has_info_class(col, MixinInfo) for col in self.columns.values()) @property def has_masked_columns(self): """True if table has any ``MaskedColumn`` columns. This does not check for mixin columns that may have masked values, use the ``has_masked_values`` property in that case. """ return any(isinstance(col, MaskedColumn) for col in self.itercols()) @property def has_masked_values(self): """True if column in the table has values which are masked. This may be relatively slow for large tables as it requires checking the mask values of each column. """ for col in self.itercols(): if hasattr(col, 'mask') and np.any(col.mask): return True else: return False def _is_mixin_for_table(self, col): """ Determine if ``col`` should be added to the table directly as a mixin column. """ if isinstance(col, BaseColumn): return False # Is it a mixin but not [Masked]Quantity (which gets converted to # [Masked]Column with unit set). return has_info_class(col, MixinInfo) and not has_info_class(col, QuantityInfo) @format_doc(_pprint_docs) def pprint(self, max_lines=None, max_width=None, show_name=True, show_unit=None, show_dtype=False, align=None): """Print a formatted string representation of the table. If no value of ``max_lines`` is supplied then the height of the screen terminal is used to set ``max_lines``. If the terminal height cannot be determined then the default is taken from the configuration item ``astropy.conf.max_lines``. If a negative value of ``max_lines`` is supplied then there is no line limit applied. The same applies for max_width except the configuration item is ``astropy.conf.max_width``. """ lines, outs = self.formatter._pformat_table(self, max_lines, max_width, show_name=show_name, show_unit=show_unit, show_dtype=show_dtype, align=align) if outs['show_length']: lines.append(f'Length = {len(self)} rows') n_header = outs['n_header'] for i, line in enumerate(lines): if i < n_header: color_print(line, 'red') else: print(line) @format_doc(_pprint_docs) def pprint_all(self, max_lines=-1, max_width=-1, show_name=True, show_unit=None, show_dtype=False, align=None): """Print a formatted string representation of the entire table. This method is the same as `astropy.table.Table.pprint` except that the default ``max_lines`` and ``max_width`` are both -1 so that by default the entire table is printed instead of restricting to the size of the screen terminal. """ return self.pprint(max_lines, max_width, show_name, show_unit, show_dtype, align) def _make_index_row_display_table(self, index_row_name): if index_row_name not in self.columns: idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self))) return self.__class__([idx_col] + list(self.columns.values()), copy=False) else: return self def show_in_notebook(self, tableid=None, css=None, display_length=50, table_class='astropy-default', show_row_index='idx'): """Render the table in HTML and show it in the IPython notebook. Parameters ---------- tableid : str or None An html ID tag for the table. Default is ``table{id}-XXX``, where id is the unique integer id of the table object, id(self), and XXX is a random number to avoid conflicts when printing the same table multiple times. table_class : str or None A string with a list of HTML classes used to style the table. The special default string ('astropy-default') means that the string will be retrieved from the configuration item ``astropy.table.default_notebook_table_class``. Note that these table classes may make use of bootstrap, as this is loaded with the notebook. See `this page <https://getbootstrap.com/css/#tables>`_ for the list of classes. css : str A valid CSS string declaring the formatting for the table. Defaults to ``astropy.table.jsviewer.DEFAULT_CSS_NB``. display_length : int, optional Number or rows to show. Defaults to 50. show_row_index : str or False If this does not evaluate to False, a column with the given name will be added to the version of the table that gets displayed. This new column shows the index of the row in the table itself, even when the displayed table is re-sorted by another column. Note that if a column with this name already exists, this option will be ignored. Defaults to "idx". Notes ----- Currently, unlike `show_in_browser` (with ``jsviewer=True``), this method needs to access online javascript code repositories. This is due to modern browsers' limitations on accessing local files. Hence, if you call this method while offline (and don't have a cached version of jquery and jquery.dataTables), you will not get the jsviewer features. """ from .jsviewer import JSViewer from IPython.display import HTML if tableid is None: tableid = f'table{id(self)}-{np.random.randint(1, 1e6)}' jsv = JSViewer(display_length=display_length) if show_row_index: display_table = self._make_index_row_display_table(show_row_index) else: display_table = self if table_class == 'astropy-default': table_class = conf.default_notebook_table_class html = display_table._base_repr_(html=True, max_width=-1, tableid=tableid, max_lines=-1, show_dtype=False, tableclass=table_class) columns = display_table.columns.values() sortable_columns = [i for i, col in enumerate(columns) if col.info.dtype.kind in 'iufc'] html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns) return HTML(html) def show_in_browser(self, max_lines=5000, jsviewer=False, browser='default', jskwargs={'use_local_files': True}, tableid=None, table_class="display compact", css=None, show_row_index='idx'): """Render the table in HTML and show it in a web browser. Parameters ---------- max_lines : int Maximum number of rows to export to the table (set low by default to avoid memory issues, since the browser view requires duplicating the table in memory). A negative value of ``max_lines`` indicates no row limit. jsviewer : bool If `True`, prepends some javascript headers so that the table is rendered as a `DataTables <https://datatables.net>`_ data table. This allows in-browser searching & sorting. browser : str Any legal browser name, e.g. ``'firefox'``, ``'chrome'``, ``'safari'`` (for mac, you may need to use ``'open -a "/Applications/Google Chrome.app" {}'`` for Chrome). If ``'default'``, will use the system default browser. jskwargs : dict Passed to the `astropy.table.JSViewer` init. Defaults to ``{'use_local_files': True}`` which means that the JavaScript libraries will be served from local copies. tableid : str or None An html ID tag for the table. Default is ``table{id}``, where id is the unique integer id of the table object, id(self). table_class : str or None A string with a list of HTML classes used to style the table. Default is "display compact", and other possible values can be found in https://www.datatables.net/manual/styling/classes css : str A valid CSS string declaring the formatting for the table. Defaults to ``astropy.table.jsviewer.DEFAULT_CSS``. show_row_index : str or False If this does not evaluate to False, a column with the given name will be added to the version of the table that gets displayed. This new column shows the index of the row in the table itself, even when the displayed table is re-sorted by another column. Note that if a column with this name already exists, this option will be ignored. Defaults to "idx". """ import os import webbrowser import tempfile from .jsviewer import DEFAULT_CSS from urllib.parse import urljoin from urllib.request import pathname2url if css is None: css = DEFAULT_CSS # We can't use NamedTemporaryFile here because it gets deleted as # soon as it gets garbage collected. tmpdir = tempfile.mkdtemp() path = os.path.join(tmpdir, 'table.html') with open(path, 'w') as tmp: if jsviewer: if show_row_index: display_table = self._make_index_row_display_table(show_row_index) else: display_table = self display_table.write(tmp, format='jsviewer', css=css, max_lines=max_lines, jskwargs=jskwargs, table_id=tableid, table_class=table_class) else: self.write(tmp, format='html') try: br = webbrowser.get(None if browser == 'default' else browser) except webbrowser.Error: log.error(f"Browser '{browser}' not found.") else: br.open(urljoin('file:', pathname2url(path))) @format_doc(_pformat_docs, id="{id}") def pformat(self, max_lines=None, max_width=None, show_name=True, show_unit=None, show_dtype=False, html=False, tableid=None, align=None, tableclass=None): """Return a list of lines for the formatted string representation of the table. If no value of ``max_lines`` is supplied then the height of the screen terminal is used to set ``max_lines``. If the terminal height cannot be determined then the default is taken from the configuration item ``astropy.conf.max_lines``. If a negative value of ``max_lines`` is supplied then there is no line limit applied. The same applies for ``max_width`` except the configuration item is ``astropy.conf.max_width``. """ lines, outs = self.formatter._pformat_table( self, max_lines, max_width, show_name=show_name, show_unit=show_unit, show_dtype=show_dtype, html=html, tableid=tableid, tableclass=tableclass, align=align) if outs['show_length']: lines.append(f'Length = {len(self)} rows') return lines @format_doc(_pformat_docs, id="{id}") def pformat_all(self, max_lines=-1, max_width=-1, show_name=True, show_unit=None, show_dtype=False, html=False, tableid=None, align=None, tableclass=None): """Return a list of lines for the formatted string representation of the entire table. If no value of ``max_lines`` is supplied then the height of the screen terminal is used to set ``max_lines``. If the terminal height cannot be determined then the default is taken from the configuration item ``astropy.conf.max_lines``. If a negative value of ``max_lines`` is supplied then there is no line limit applied. The same applies for ``max_width`` except the configuration item is ``astropy.conf.max_width``. """ return self.pformat(max_lines, max_width, show_name, show_unit, show_dtype, html, tableid, align, tableclass) def more(self, max_lines=None, max_width=None, show_name=True, show_unit=None, show_dtype=False): """Interactively browse table with a paging interface. Supported keys:: f, <space> : forward one page b : back one page r : refresh same page n : next row p : previous row < : go to beginning > : go to end q : quit browsing h : print this help Parameters ---------- max_lines : int Maximum number of lines in table output max_width : int or None Maximum character width of output show_name : bool Include a header row for column names. Default is True. show_unit : bool Include a header row for unit. Default is to show a row for units only if one or more columns has a defined value for the unit. show_dtype : bool Include a header row for column dtypes. Default is False. """ self.formatter._more_tabcol(self, max_lines, max_width, show_name=show_name, show_unit=show_unit, show_dtype=show_dtype) def __getitem__(self, item): if isinstance(item, str): return self.columns[item] elif isinstance(item, (int, np.integer)): return self.Row(self, item) elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'): return self.Row(self, item.item()) elif self._is_list_or_tuple_of_str(item): out = self.__class__([self[x] for x in item], copy_indices=self._copy_indices) out._groups = groups.TableGroups(out, indices=self.groups._indices, keys=self.groups._keys) out.meta = self.meta.copy() # Shallow copy for meta return out elif ((isinstance(item, np.ndarray) and item.size == 0) or (isinstance(item, (tuple, list)) and not item)): # If item is an empty array/list/tuple then return the table with no rows return self._new_from_slice([]) elif (isinstance(item, slice) or isinstance(item, np.ndarray) or isinstance(item, list) or isinstance(item, tuple) and all(isinstance(x, np.ndarray) for x in item)): # here for the many ways to give a slice; a tuple of ndarray # is produced by np.where, as in t[np.where(t['a'] > 2)] # For all, a new table is constructed with slice of all columns return self._new_from_slice(item) else: raise ValueError(f'Illegal type {type(item)} for table item access') def __setitem__(self, item, value): # If the item is a string then it must be the name of a column. # If that column doesn't already exist then create it now. if isinstance(item, str) and item not in self.colnames: self.add_column(value, name=item, copy=True) else: n_cols = len(self.columns) if isinstance(item, str): # Set an existing column by first trying to replace, and if # this fails do an in-place update. See definition of mask # property for discussion of the _setitem_inplace attribute. if (not getattr(self, '_setitem_inplace', False) and not conf.replace_inplace): try: self._replace_column_warnings(item, value) return except Exception: pass self.columns[item][:] = value elif isinstance(item, (int, np.integer)): self._set_row(idx=item, colnames=self.colnames, vals=value) elif (isinstance(item, slice) or isinstance(item, np.ndarray) or isinstance(item, list) or (isinstance(item, tuple) # output from np.where and all(isinstance(x, np.ndarray) for x in item))): if isinstance(value, Table): vals = (col for col in value.columns.values()) elif isinstance(value, np.ndarray) and value.dtype.names: vals = (value[name] for name in value.dtype.names) elif np.isscalar(value): vals = itertools.repeat(value, n_cols) else: # Assume this is an iterable that will work if len(value) != n_cols: raise ValueError('Right side value needs {} elements (one for each column)' .format(n_cols)) vals = value for col, val in zip(self.columns.values(), vals): col[item] = val else: raise ValueError(f'Illegal type {type(item)} for table item access') def __delitem__(self, item): if isinstance(item, str): self.remove_column(item) elif isinstance(item, (int, np.integer)): self.remove_row(item) elif (isinstance(item, (list, tuple, np.ndarray)) and all(isinstance(x, str) for x in item)): self.remove_columns(item) elif (isinstance(item, (list, np.ndarray)) and np.asarray(item).dtype.kind == 'i'): self.remove_rows(item) elif isinstance(item, slice): self.remove_rows(item) else: raise IndexError('illegal key or index value') def _ipython_key_completions_(self): return self.colnames def field(self, item): """Return column[item] for recarray compatibility.""" return self.columns[item] @property def masked(self): return self._masked @masked.setter def masked(self, masked): raise Exception('Masked attribute is read-only (use t = Table(t, masked=True)' ' to convert to a masked table)') def _set_masked(self, masked): """ Set the table masked property. Parameters ---------- masked : bool State of table masking (`True` or `False`) """ if masked in [True, False, None]: self._masked = masked else: raise ValueError("masked should be one of True, False, None") self._column_class = self.MaskedColumn if self._masked else self.Column @property def ColumnClass(self): if self._column_class is None: return self.Column else: return self._column_class @property def dtype(self): return np.dtype([descr(col) for col in self.columns.values()]) @property def colnames(self): return list(self.columns.keys()) @staticmethod def _is_list_or_tuple_of_str(names): """Check that ``names`` is a tuple or list of strings""" return (isinstance(names, (tuple, list)) and names and all(isinstance(x, str) for x in names)) def keys(self): return list(self.columns.keys()) def values(self): return self.columns.values() def items(self): return self.columns.items() def __len__(self): # For performance reasons (esp. in Row) cache the first column name # and use that subsequently for the table length. If might not be # available yet or the column might be gone now, in which case # try again in the except block. try: return len(OrderedDict.__getitem__(self.columns, self._first_colname)) except (AttributeError, KeyError): if len(self.columns) == 0: return 0 # Get the first column name self._first_colname = next(iter(self.columns)) return len(self.columns[self._first_colname]) def index_column(self, name): """ Return the positional index of column ``name``. Parameters ---------- name : str column name Returns ------- index : int Positional index of column ``name``. Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y 3 0.3 z Get index of column 'b' of the table:: >>> t.index_column('b') 1 """ try: return self.colnames.index(name) except ValueError: raise ValueError(f"Column {name} does not exist") def add_column(self, col, index=None, name=None, rename_duplicate=False, copy=True, default_name=None): """ Add a new column to the table using ``col`` as input. If ``index`` is supplied then insert column before ``index`` position in the list of columns, otherwise append column to the end of the list. The ``col`` input can be any data object which is acceptable as a `~astropy.table.Table` column object or can be converted. This includes mixin columns and scalar or length=1 objects which get broadcast to match the table length. To add several columns at once use ``add_columns()`` or simply call ``add_column()`` for each one. There is very little performance difference in the two approaches. Parameters ---------- col : object Data object for the new column index : int or None Insert column before this position or at end (default). name : str Column name rename_duplicate : bool Uniquify column name if it already exist. Default is False. copy : bool Make a copy of the new column. Default is True. default_name : str or None Name to use if both ``name`` and ``col.info.name`` are not available. Defaults to ``col{number_of_columns}``. Examples -------- Create a table with two columns 'a' and 'b', then create a third column 'c' and append it to the end of the table:: >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b')) >>> col_c = Column(name='c', data=['x', 'y']) >>> t.add_column(col_c) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y Add column 'd' at position 1. Note that the column is inserted before the given index:: >>> t.add_column(['a', 'b'], name='d', index=1) >>> print(t) a d b c --- --- --- --- 1 a 0.1 x 2 b 0.2 y Add second column named 'b' with rename_duplicate:: >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b')) >>> t.add_column(1.1, name='b', rename_duplicate=True) >>> print(t) a b b_1 --- --- --- 1 0.1 1.1 2 0.2 1.1 Add an unnamed column or mixin object in the table using a default name or by specifying an explicit name with ``name``. Name can also be overridden:: >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b')) >>> t.add_column(['a', 'b']) >>> t.add_column(col_c, name='d') >>> print(t) a b col2 d --- --- ---- --- 1 0.1 a x 2 0.2 b y """ if default_name is None: default_name = f'col{len(self.columns)}' # Convert col data to acceptable object for insertion into self.columns. # Note that along with the lines above and below, this allows broadcasting # of scalars to the correct shape for adding to table. col = self._convert_data_to_col(col, name=name, copy=copy, default_name=default_name) # Assigning a scalar column to an empty table should result in an # exception (see #3811). if col.shape == () and len(self) == 0: raise TypeError('Empty table cannot have column set to scalar value') # Make col data shape correct for scalars. The second test is to allow # broadcasting an N-d element to a column, e.g. t['new'] = [[1, 2]]. elif (col.shape == () or col.shape[0] == 1) and len(self) > 0: new_shape = (len(self),) + getattr(col, 'shape', ())[1:] if isinstance(col, np.ndarray): col = np.broadcast_to(col, shape=new_shape, subok=True) elif isinstance(col, ShapedLikeNDArray): col = col._apply(np.broadcast_to, shape=new_shape, subok=True) # broadcast_to() results in a read-only array. Apparently it only changes # the view to look like the broadcasted array. So copy. col = col_copy(col) name = col.info.name # Ensure that new column is the right length if len(self.columns) > 0 and len(col) != len(self): raise ValueError('Inconsistent data column lengths') if rename_duplicate: orig_name = name i = 1 while name in self.columns: # Iterate until a unique name is found name = orig_name + '_' + str(i) i += 1 col.info.name = name # Set col parent_table weakref and ensure col has mask attribute if table.masked self._set_col_parent_table_and_mask(col) # Add new column as last column self.columns[name] = col if index is not None: # Move the other cols to the right of the new one move_names = self.colnames[index:-1] for move_name in move_names: self.columns.move_to_end(move_name, last=True) def add_columns(self, cols, indexes=None, names=None, copy=True, rename_duplicate=False): """ Add a list of new columns the table using ``cols`` data objects. If a corresponding list of ``indexes`` is supplied then insert column before each ``index`` position in the *original* list of columns, otherwise append columns to the end of the list. The ``cols`` input can include any data objects which are acceptable as `~astropy.table.Table` column objects or can be converted. This includes mixin columns and scalar or length=1 objects which get broadcast to match the table length. From a performance perspective there is little difference between calling this method once or looping over the new columns and calling ``add_column()`` for each column. Parameters ---------- cols : list of object List of data objects for the new columns indexes : list of int or None Insert column before this position or at end (default). names : list of str Column names copy : bool Make a copy of the new columns. Default is True. rename_duplicate : bool Uniquify new column names if they duplicate the existing ones. Default is False. See Also -------- astropy.table.hstack, update, replace_column Examples -------- Create a table with two columns 'a' and 'b', then create columns 'c' and 'd' and append them to the end of the table:: >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b')) >>> col_c = Column(name='c', data=['x', 'y']) >>> col_d = Column(name='d', data=['u', 'v']) >>> t.add_columns([col_c, col_d]) >>> print(t) a b c d --- --- --- --- 1 0.1 x u 2 0.2 y v Add column 'c' at position 0 and column 'd' at position 1. Note that the columns are inserted before the given position:: >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b')) >>> t.add_columns([['x', 'y'], ['u', 'v']], names=['c', 'd'], ... indexes=[0, 1]) >>> print(t) c a d b --- --- --- --- x 1 u 0.1 y 2 v 0.2 Add second column 'b' and column 'c' with ``rename_duplicate``:: >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b')) >>> t.add_columns([[1.1, 1.2], ['x', 'y']], names=('b', 'c'), ... rename_duplicate=True) >>> print(t) a b b_1 c --- --- --- --- 1 0.1 1.1 x 2 0.2 1.2 y Add unnamed columns or mixin objects in the table using default names or by specifying explicit names with ``names``. Names can also be overridden:: >>> t = Table() >>> col_b = Column(name='b', data=['u', 'v']) >>> t.add_columns([[1, 2], col_b]) >>> t.add_columns([[3, 4], col_b], names=['c', 'd']) >>> print(t) col0 b c d ---- --- --- --- 1 u 3 u 2 v 4 v """ if indexes is None: indexes = [len(self.columns)] * len(cols) elif len(indexes) != len(cols): raise ValueError('Number of indexes must match number of cols') if names is None: names = (None,) * len(cols) elif len(names) != len(cols): raise ValueError('Number of names must match number of cols') default_names = [f'col{ii + len(self.columns)}' for ii in range(len(cols))] for ii in reversed(np.argsort(indexes)): self.add_column(cols[ii], index=indexes[ii], name=names[ii], default_name=default_names[ii], rename_duplicate=rename_duplicate, copy=copy) def _replace_column_warnings(self, name, col): """ Same as replace_column but issues warnings under various circumstances. """ warns = conf.replace_warnings refcount = None old_col = None if 'refcount' in warns and name in self.colnames: refcount = sys.getrefcount(self[name]) if name in self.colnames: old_col = self[name] # This may raise an exception (e.g. t['a'] = 1) in which case none of # the downstream code runs. self.replace_column(name, col) if 'always' in warns: warnings.warn(f"replaced column '{name}'", TableReplaceWarning, stacklevel=3) if 'slice' in warns: try: # Check for ndarray-subclass slice. An unsliced instance # has an ndarray for the base while sliced has the same class # as parent. if isinstance(old_col.base, old_col.__class__): msg = ("replaced column '{}' which looks like an array slice. " "The new column no longer shares memory with the " "original array.".format(name)) warnings.warn(msg, TableReplaceWarning, stacklevel=3) except AttributeError: pass if 'refcount' in warns: # Did reference count change? new_refcount = sys.getrefcount(self[name]) if refcount != new_refcount: msg = ("replaced column '{}' and the number of references " "to the column changed.".format(name)) warnings.warn(msg, TableReplaceWarning, stacklevel=3) if 'attributes' in warns: # Any of the standard column attributes changed? changed_attrs = [] new_col = self[name] # Check base DataInfo attributes that any column will have for attr in DataInfo.attr_names: if getattr(old_col.info, attr) != getattr(new_col.info, attr): changed_attrs.append(attr) if changed_attrs: msg = ("replaced column '{}' and column attributes {} changed." .format(name, changed_attrs)) warnings.warn(msg, TableReplaceWarning, stacklevel=3) def replace_column(self, name, col, copy=True): """ Replace column ``name`` with the new ``col`` object. The behavior of ``copy`` for Column objects is: - copy=True: new class instance with a copy of data and deep copy of meta - copy=False: new class instance with same data and a key-only copy of meta For mixin columns: - copy=True: new class instance with copy of data and deep copy of meta - copy=False: original instance (no copy at all) Parameters ---------- name : str Name of column to replace col : `~astropy.table.Column` or `~numpy.ndarray` or sequence New column object to replace the existing column. copy : bool Make copy of the input ``col``, default=True See Also -------- add_columns, astropy.table.hstack, update Examples -------- Replace column 'a' with a float version of itself:: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b')) >>> float_a = t['a'].astype(float) >>> t.replace_column('a', float_a) """ if name not in self.colnames: raise ValueError(f'column name {name} is not in the table') if self[name].info.indices: raise ValueError('cannot replace a table index column') col = self._convert_data_to_col(col, name=name, copy=copy) self._set_col_parent_table_and_mask(col) # Ensure that new column is the right length, unless it is the only column # in which case re-sizing is allowed. if len(self.columns) > 1 and len(col) != len(self[name]): raise ValueError('length of new column must match table length') self.columns.__setitem__(name, col, validated=True) def remove_row(self, index): """ Remove a row from the table. Parameters ---------- index : int Index of row to remove Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y 3 0.3 z Remove row 1 from the table:: >>> t.remove_row(1) >>> print(t) a b c --- --- --- 1 0.1 x 3 0.3 z To remove several rows at the same time use remove_rows. """ # check the index against the types that work with np.delete if not isinstance(index, (int, np.integer)): raise TypeError("Row index must be an integer") self.remove_rows(index) def remove_rows(self, row_specifier): """ Remove rows from the table. Parameters ---------- row_specifier : slice or int or array of int Specification for rows to remove Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y 3 0.3 z Remove rows 0 and 2 from the table:: >>> t.remove_rows([0, 2]) >>> print(t) a b c --- --- --- 2 0.2 y Note that there are no warnings if the slice operator extends outside the data:: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> t.remove_rows(slice(10, 20, 1)) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y 3 0.3 z """ # Update indices for index in self.indices: index.remove_rows(row_specifier) keep_mask = np.ones(len(self), dtype=bool) keep_mask[row_specifier] = False columns = self.TableColumns() for name, col in self.columns.items(): newcol = col[keep_mask] newcol.info.parent_table = self columns[name] = newcol self._replace_cols(columns) # Revert groups to default (ungrouped) state if hasattr(self, '_groups'): del self._groups def iterrows(self, *names): """ Iterate over rows of table returning a tuple of values for each row. This method is especially useful when only a subset of columns are needed. The ``iterrows`` method can be substantially faster than using the standard Table row iteration (e.g. ``for row in tbl:``), since that returns a new ``~astropy.table.Row`` object for each row and accessing a column in that row (e.g. ``row['col0']``) is slower than tuple access. Parameters ---------- names : list List of column names (default to all columns if no names provided) Returns ------- rows : iterable Iterator returns tuples of row values Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table({'a': [1, 2, 3], ... 'b': [1.0, 2.5, 3.0], ... 'c': ['x', 'y', 'z']}) To iterate row-wise using column names:: >>> for a, c in t.iterrows('a', 'c'): ... print(a, c) 1 x 2 y 3 z """ if len(names) == 0: names = self.colnames else: for name in names: if name not in self.colnames: raise ValueError(f'{name} is not a valid column name') cols = (self[name] for name in names) out = zip(*cols) return out def _set_of_names_in_colnames(self, names): """Return ``names`` as a set if valid, or raise a `KeyError`. ``names`` is valid if all elements in it are in ``self.colnames``. If ``names`` is a string then it is interpreted as a single column name. """ names = {names} if isinstance(names, str) else set(names) invalid_names = names.difference(self.colnames) if len(invalid_names) == 1: raise KeyError(f'column "{invalid_names.pop()}" does not exist') elif len(invalid_names) > 1: raise KeyError(f'columns {invalid_names} do not exist') return names def remove_column(self, name): """ Remove a column from the table. This can also be done with:: del table[name] Parameters ---------- name : str Name of column to remove Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y 3 0.3 z Remove column 'b' from the table:: >>> t.remove_column('b') >>> print(t) a c --- --- 1 x 2 y 3 z To remove several columns at the same time use remove_columns. """ self.remove_columns([name]) def remove_columns(self, names): ''' Remove several columns from the table. Parameters ---------- names : str or iterable of str Names of the columns to remove Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y 3 0.3 z Remove columns 'b' and 'c' from the table:: >>> t.remove_columns(['b', 'c']) >>> print(t) a --- 1 2 3 Specifying only a single column also works. Remove column 'b' from the table:: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> t.remove_columns('b') >>> print(t) a c --- --- 1 x 2 y 3 z This gives the same as using remove_column. ''' for name in self._set_of_names_in_colnames(names): self.columns.pop(name) def _convert_string_dtype(self, in_kind, out_kind, encode_decode_func): """ Convert string-like columns to/from bytestring and unicode (internal only). Parameters ---------- in_kind : str Input dtype.kind out_kind : str Output dtype.kind """ for col in self.itercols(): if col.dtype.kind == in_kind: try: # This requires ASCII and is faster by a factor of up to ~8, so # try that first. newcol = col.__class__(col, dtype=out_kind) except (UnicodeEncodeError, UnicodeDecodeError): newcol = col.__class__(encode_decode_func(col, 'utf-8')) # Quasi-manually copy info attributes. Unfortunately # DataInfo.__set__ does not do the right thing in this case # so newcol.info = col.info does not get the old info attributes. for attr in col.info.attr_names - col.info._attrs_no_copy - set(['dtype']): value = deepcopy(getattr(col.info, attr)) setattr(newcol.info, attr, value) self[col.name] = newcol def convert_bytestring_to_unicode(self): """ Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U') using UTF-8 encoding. Internally this changes string columns to represent each character in the string with a 4-byte UCS-4 equivalent, so it is inefficient for memory but allows scripts to manipulate string arrays with natural syntax. """ self._convert_string_dtype('S', 'U', np.char.decode) def convert_unicode_to_bytestring(self): """ Convert unicode columns (dtype.kind='U') to bytestring (dtype.kind='S') using UTF-8 encoding. When exporting a unicode string array to a file, it may be desirable to encode unicode columns as bytestrings. """ self._convert_string_dtype('U', 'S', np.char.encode) def keep_columns(self, names): ''' Keep only the columns specified (remove the others). Parameters ---------- names : str or iterable of str The columns to keep. All other columns will be removed. Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y 3 0.3 z Keep only column 'a' of the table:: >>> t.keep_columns('a') >>> print(t) a --- 1 2 3 Keep columns 'a' and 'c' of the table:: >>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> t.keep_columns(['a', 'c']) >>> print(t) a c --- --- 1 x 2 y 3 z ''' names = self._set_of_names_in_colnames(names) for colname in self.colnames: if colname not in names: self.columns.pop(colname) def rename_column(self, name, new_name): ''' Rename a column. This can also be done directly with by setting the ``name`` attribute for a column:: table[name].name = new_name TODO: this won't work for mixins Parameters ---------- name : str The current name of the column. new_name : str The new name for the column Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c')) >>> print(t) a b c --- --- --- 1 3 5 2 4 6 Renaming column 'a' to 'aa':: >>> t.rename_column('a' , 'aa') >>> print(t) aa b c --- --- --- 1 3 5 2 4 6 ''' if name not in self.keys(): raise KeyError(f"Column {name} does not exist") self.columns[name].info.name = new_name def rename_columns(self, names, new_names): ''' Rename multiple columns. Parameters ---------- names : list, tuple A list or tuple of existing column names. new_names : list, tuple A list or tuple of new column names. Examples -------- Create a table with three columns 'a', 'b', 'c':: >>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c')) >>> print(t) a b c --- --- --- 1 3 5 2 4 6 Renaming columns 'a' to 'aa' and 'b' to 'bb':: >>> names = ('a','b') >>> new_names = ('aa','bb') >>> t.rename_columns(names, new_names) >>> print(t) aa bb c --- --- --- 1 3 5 2 4 6 ''' if not self._is_list_or_tuple_of_str(names): raise TypeError("input 'names' must be a tuple or a list of column names") if not self._is_list_or_tuple_of_str(new_names): raise TypeError("input 'new_names' must be a tuple or a list of column names") if len(names) != len(new_names): raise ValueError("input 'names' and 'new_names' list arguments must be the same length") for name, new_name in zip(names, new_names): self.rename_column(name, new_name) def _set_row(self, idx, colnames, vals): try: assert len(vals) == len(colnames) except Exception: raise ValueError('right hand side must be a sequence of values with ' 'the same length as the number of selected columns') # Keep track of original values before setting each column so that # setting row can be transactional. orig_vals = [] cols = self.columns try: for name, val in zip(colnames, vals): orig_vals.append(cols[name][idx]) cols[name][idx] = val except Exception: # If anything went wrong first revert the row update then raise for name, val in zip(colnames, orig_vals[:-1]): cols[name][idx] = val raise def add_row(self, vals=None, mask=None): """Add a new row to the end of the table. The ``vals`` argument can be: sequence (e.g. tuple or list) Column values in the same order as table columns. mapping (e.g. dict) Keys corresponding to column names. Missing values will be filled with np.zeros for the column dtype. `None` All values filled with np.zeros for the column dtype. This method requires that the Table object "owns" the underlying array data. In particular one cannot add a row to a Table that was initialized with copy=False from an existing array. The ``mask`` attribute should give (if desired) the mask for the values. The type of the mask should match that of the values, i.e. if ``vals`` is an iterable, then ``mask`` should also be an iterable with the same length, and if ``vals`` is a mapping, then ``mask`` should be a dictionary. Parameters ---------- vals : tuple, list, dict or None Use the specified values in the new row mask : tuple, list, dict or None Use the specified mask values in the new row Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c')) >>> print(t) a b c --- --- --- 1 4 7 2 5 8 Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c':: >>> t.add_row([3,6,9]) >>> print(t) a b c --- --- --- 1 4 7 2 5 8 3 6 9 """ self.insert_row(len(self), vals, mask) def insert_row(self, index, vals=None, mask=None): """Add a new row before the given ``index`` position in the table. The ``vals`` argument can be: sequence (e.g. tuple or list) Column values in the same order as table columns. mapping (e.g. dict) Keys corresponding to column names. Missing values will be filled with np.zeros for the column dtype. `None` All values filled with np.zeros for the column dtype. The ``mask`` attribute should give (if desired) the mask for the values. The type of the mask should match that of the values, i.e. if ``vals`` is an iterable, then ``mask`` should also be an iterable with the same length, and if ``vals`` is a mapping, then ``mask`` should be a dictionary. Parameters ---------- vals : tuple, list, dict or None Use the specified values in the new row mask : tuple, list, dict or None Use the specified mask values in the new row """ colnames = self.colnames N = len(self) if index < -N or index > N: raise IndexError("Index {} is out of bounds for table with length {}" .format(index, N)) if index < 0: index += N if isinstance(vals, Mapping) or vals is None: # From the vals and/or mask mappings create the corresponding lists # that have entries for each table column. if mask is not None and not isinstance(mask, Mapping): raise TypeError("Mismatch between type of vals and mask") # Now check that the mask is specified for the same keys as the # values, otherwise things get really confusing. if mask is not None and set(vals.keys()) != set(mask.keys()): raise ValueError('keys in mask should match keys in vals') if vals and any(name not in colnames for name in vals): raise ValueError('Keys in vals must all be valid column names') vals_list = [] mask_list = [] for name in colnames: if vals and name in vals: vals_list.append(vals[name]) mask_list.append(False if mask is None else mask[name]) else: col = self[name] if hasattr(col, 'dtype'): # Make a placeholder zero element of the right type which is masked. # This assumes the appropriate insert() method will broadcast a # numpy scalar to the right shape. vals_list.append(np.zeros(shape=(), dtype=col.dtype)) # For masked table any unsupplied values are masked by default. mask_list.append(self.masked and vals is not None) else: raise ValueError(f"Value must be supplied for column '{name}'") vals = vals_list mask = mask_list if isiterable(vals): if mask is not None and (not isiterable(mask) or isinstance(mask, Mapping)): raise TypeError("Mismatch between type of vals and mask") if len(self.columns) != len(vals): raise ValueError('Mismatch between number of vals and columns') if mask is not None: if len(self.columns) != len(mask): raise ValueError('Mismatch between number of masks and columns') else: mask = [False] * len(self.columns) else: raise TypeError('Vals must be an iterable or mapping or None') # Insert val at index for each column columns = self.TableColumns() for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask): try: # If new val is masked and the existing column does not support masking # then upgrade the column to a mask-enabled type: either the table-level # default ColumnClass or else MaskedColumn. if mask_ and isinstance(col, Column) and not isinstance(col, MaskedColumn): col_cls = (self.ColumnClass if issubclass(self.ColumnClass, self.MaskedColumn) else self.MaskedColumn) col = col_cls(col, copy=False) newcol = col.insert(index, val, axis=0) if len(newcol) != N + 1: raise ValueError('Incorrect length for column {} after inserting {}' ' (expected {}, got {})' .format(name, val, len(newcol), N + 1)) newcol.info.parent_table = self # Set mask if needed and possible if mask_: if hasattr(newcol, 'mask'): newcol[index] = np.ma.masked else: raise TypeError("mask was supplied for column '{}' but it does not " "support masked values".format(col.info.name)) columns[name] = newcol except Exception as err: raise ValueError("Unable to insert row because of exception in column '{}':\n{}" .format(name, err)) from err for table_index in self.indices: table_index.insert_row(index, vals, self.columns.values()) self._replace_cols(columns) # Revert groups to default (ungrouped) state if hasattr(self, '_groups'): del self._groups def _replace_cols(self, columns): for col, new_col in zip(self.columns.values(), columns.values()): new_col.info.indices = [] for index in col.info.indices: index.columns[index.col_position(col.info.name)] = new_col new_col.info.indices.append(index) self.columns = columns def update(self, other, copy=True): """ Perform a dictionary-style update and merge metadata. The argument ``other`` must be a |Table|, or something that can be used to initialize a table. Columns from (possibly converted) ``other`` are added to this table. In case of matching column names the column from this table is replaced with the one from ``other``. Parameters ---------- other : table-like Data to update this table with. copy : bool Whether the updated columns should be copies of or references to the originals. See Also -------- add_columns, astropy.table.hstack, replace_column Examples -------- Update a table with another table:: >>> t1 = Table({'a': ['foo', 'bar'], 'b': [0., 0.]}, meta={'i': 0}) >>> t2 = Table({'b': [1., 2.], 'c': [7., 11.]}, meta={'n': 2}) >>> t1.update(t2) >>> t1 <Table length=2> a b c str3 float64 float64 ---- ------- ------- foo 1.0 7.0 bar 2.0 11.0 >>> t1.meta {'i': 0, 'n': 2} Update a table with a dictionary:: >>> t = Table({'a': ['foo', 'bar'], 'b': [0., 0.]}) >>> t.update({'b': [1., 2.]}) >>> t <Table length=2> a b str3 float64 ---- ------- foo 1.0 bar 2.0 """ from .operations import _merge_table_meta if not isinstance(other, Table): other = self.__class__(other, copy=copy) common_cols = set(self.colnames).intersection(other.colnames) for name, col in other.items(): if name in common_cols: self.replace_column(name, col, copy=copy) else: self.add_column(col, name=name, copy=copy) _merge_table_meta(self, [self, other], metadata_conflicts='silent') def argsort(self, keys=None, kind=None, reverse=False): """ Return the indices which would sort the table according to one or more key columns. This simply calls the `numpy.argsort` function on the table with the ``order`` parameter set to ``keys``. Parameters ---------- keys : str or list of str The column name(s) to order the table by kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional Sorting algorithm used by ``numpy.argsort``. reverse : bool Sort in reverse order (default=False) Returns ------- index_array : ndarray, int Array of indices that sorts the table by the specified key column(s). """ if isinstance(keys, str): keys = [keys] # use index sorted order if possible if keys is not None: index = get_index(self, names=keys) if index is not None: idx = np.asarray(index.sorted_data()) return idx[::-1] if reverse else idx kwargs = {} if keys: # For multiple keys return a structured array which gets sorted, # while for a single key return a single ndarray. Sorting a # one-column structured array is slower than ndarray (e.g. a # factor of ~6 for a 10 million long random array), and much slower # for in principle sortable columns like Time, which get stored as # object arrays. if len(keys) > 1: kwargs['order'] = keys data = self.as_array(names=keys) else: data = self[keys[0]] else: # No keys provided so sort on all columns. data = self.as_array() if kind: kwargs['kind'] = kind # np.argsort will look for a possible .argsort method (e.g., for Time), # and if that fails cast to an array and try sorting that way. idx = np.argsort(data, **kwargs) return idx[::-1] if reverse else idx def sort(self, keys=None, *, kind=None, reverse=False): ''' Sort the table according to one or more keys. This operates on the existing table and does not return a new table. Parameters ---------- keys : str or list of str The key(s) to order the table by. If None, use the primary index of the Table. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional Sorting algorithm used by ``numpy.argsort``. reverse : bool Sort in reverse order (default=False) Examples -------- Create a table with 3 columns:: >>> t = Table([['Max', 'Jo', 'John'], ['Miller', 'Miller', 'Jackson'], ... [12, 15, 18]], names=('firstname', 'name', 'tel')) >>> print(t) firstname name tel --------- ------- --- Max Miller 12 Jo Miller 15 John Jackson 18 Sorting according to standard sorting rules, first 'name' then 'firstname':: >>> t.sort(['name', 'firstname']) >>> print(t) firstname name tel --------- ------- --- John Jackson 18 Jo Miller 15 Max Miller 12 Sorting according to standard sorting rules, first 'firstname' then 'tel', in reverse order:: >>> t.sort(['firstname', 'tel'], reverse=True) >>> print(t) firstname name tel --------- ------- --- Max Miller 12 John Jackson 18 Jo Miller 15 ''' if keys is None: if not self.indices: raise ValueError("Table sort requires input keys or a table index") keys = [x.info.name for x in self.indices[0].columns] if isinstance(keys, str): keys = [keys] indexes = self.argsort(keys, kind=kind, reverse=reverse) with self.index_mode('freeze'): for name, col in self.columns.items(): # Make a new sorted column. This requires that take() also copies # relevant info attributes for mixin columns. new_col = col.take(indexes, axis=0) # First statement in try: will succeed if the column supports an in-place # update, and matches the legacy behavior of astropy Table. However, # some mixin classes may not support this, so in that case just drop # in the entire new column. See #9553 and #9536 for discussion. try: col[:] = new_col except Exception: # In-place update failed for some reason, exception class not # predictable for arbitrary mixin. self[col.info.name] = new_col def reverse(self): ''' Reverse the row order of table rows. The table is reversed in place and there are no function arguments. Examples -------- Create a table with three columns:: >>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'], ... [12,15,18]], names=('firstname','name','tel')) >>> print(t) firstname name tel --------- ------- --- Max Miller 12 Jo Miller 15 John Jackson 18 Reversing order:: >>> t.reverse() >>> print(t) firstname name tel --------- ------- --- John Jackson 18 Jo Miller 15 Max Miller 12 ''' for col in self.columns.values(): # First statement in try: will succeed if the column supports an in-place # update, and matches the legacy behavior of astropy Table. However, # some mixin classes may not support this, so in that case just drop # in the entire new column. See #9836, #9553, and #9536 for discussion. new_col = col[::-1] try: col[:] = new_col except Exception: # In-place update failed for some reason, exception class not # predictable for arbitrary mixin. self[col.info.name] = new_col for index in self.indices: index.reverse() def round(self, decimals=0): ''' Round numeric columns in-place to the specified number of decimals. Non-numeric columns will be ignored. Examples -------- Create three columns with different types: >>> t = Table([[1, 4, 5], [-25.55, 12.123, 85], ... ['a', 'b', 'c']], names=('a', 'b', 'c')) >>> print(t) a b c --- ------ --- 1 -25.55 a 4 12.123 b 5 85.0 c Round them all to 0: >>> t.round(0) >>> print(t) a b c --- ----- --- 1 -26.0 a 4 12.0 b 5 85.0 c Round column 'a' to -1 decimal: >>> t.round({'a':-1}) >>> print(t) a b c --- ----- --- 0 -26.0 a 0 12.0 b 0 85.0 c Parameters ---------- decimals: int, dict Number of decimals to round the columns to. If a dict is given, the columns will be rounded to the number specified as the value. If a certain column is not in the dict given, it will remain the same. ''' if isinstance(decimals, Mapping): decimal_values = decimals.values() column_names = decimals.keys() elif isinstance(decimals, int): decimal_values = itertools.repeat(decimals) column_names = self.colnames else: raise ValueError("'decimals' argument must be an int or a dict") for colname, decimal in zip(column_names, decimal_values): col = self.columns[colname] if np.issubdtype(col.info.dtype, np.number): try: np.around(col, decimals=decimal, out=col) except TypeError: # Bug in numpy see https://github.com/numpy/numpy/issues/15438 col[()] = np.around(col, decimals=decimal) def copy(self, copy_data=True): ''' Return a copy of the table. Parameters ---------- copy_data : bool If `True` (the default), copy the underlying data array. Otherwise, use the same data array. The ``meta`` is always deepcopied regardless of the value for ``copy_data``. ''' out = self.__class__(self, copy=copy_data) # If the current table is grouped then do the same in the copy if hasattr(self, '_groups'): out._groups = groups.TableGroups(out, indices=self._groups._indices, keys=self._groups._keys) return out def __deepcopy__(self, memo=None): return self.copy(True) def __copy__(self): return self.copy(False) def __lt__(self, other): return super().__lt__(other) def __gt__(self, other): return super().__gt__(other) def __le__(self, other): return super().__le__(other) def __ge__(self, other): return super().__ge__(other) def __eq__(self, other): return self._rows_equal(other) def __ne__(self, other): return ~self.__eq__(other) def _rows_equal(self, other): """ Row-wise comparison of table with any other object. This is actual implementation for __eq__. Returns a 1-D boolean numpy array showing result of row-wise comparison. This is the same as the ``==`` comparison for tables. Parameters ---------- other : Table or DataFrame or ndarray An object to compare with table Examples -------- Comparing one Table with other:: >>> t1 = Table([[1,2],[4,5],[7,8]], names=('a','b','c')) >>> t2 = Table([[1,2],[4,5],[7,8]], names=('a','b','c')) >>> t1._rows_equal(t2) array([ True, True]) """ if isinstance(other, Table): other = other.as_array() if self.has_masked_columns: if isinstance(other, np.ma.MaskedArray): result = self.as_array() == other else: # If mask is True, then by definition the row doesn't match # because the other array is not masked. false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names]) result = (self.as_array().data == other) & (self.mask == false_mask) else: if isinstance(other, np.ma.MaskedArray): # If mask is True, then by definition the row doesn't match # because the other array is not masked. false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names]) result = (self.as_array() == other.data) & (other.mask == false_mask) else: result = self.as_array() == other return result def values_equal(self, other): """ Element-wise comparison of table with another table, list, or scalar. Returns a ``Table`` with the same columns containing boolean values showing result of comparison. Parameters ---------- other : table-like object or list or scalar Object to compare with table Examples -------- Compare one Table with other:: >>> t1 = Table([[1, 2], [4, 5], [-7, 8]], names=('a', 'b', 'c')) >>> t2 = Table([[1, 2], [-4, 5], [7, 8]], names=('a', 'b', 'c')) >>> t1.values_equal(t2) <Table length=2> a b c bool bool bool ---- ----- ----- True False False True True True """ if isinstance(other, Table): names = other.colnames else: try: other = Table(other, copy=False) names = other.colnames except Exception: # Broadcast other into a dict, so e.g. other = 2 will turn into # other = {'a': 2, 'b': 2} and then equality does a # column-by-column broadcasting. names = self.colnames other = {name: other for name in names} # Require column names match but do not require same column order if set(self.colnames) != set(names): raise ValueError('cannot compare tables with different column names') eqs = [] for name in names: try: np.broadcast(self[name], other[name]) # Check if broadcast-able # Catch the numpy FutureWarning related to equality checking, # "elementwise comparison failed; returning scalar instead, but # in the future will perform elementwise comparison". Turn this # into an exception since the scalar answer is not what we want. with warnings.catch_warnings(record=True) as warns: warnings.simplefilter('always') eq = self[name] == other[name] if (warns and issubclass(warns[-1].category, FutureWarning) and 'elementwise comparison failed' in str(warns[-1].message)): raise FutureWarning(warns[-1].message) except Exception as err: raise ValueError(f'unable to compare column {name}') from err # Be strict about the result from the comparison. E.g. SkyCoord __eq__ is just # broken and completely ignores that it should return an array. if not (isinstance(eq, np.ndarray) and eq.dtype is np.dtype('bool') and len(eq) == len(self)): raise TypeError(f'comparison for column {name} returned {eq} ' f'instead of the expected boolean ndarray') eqs.append(eq) out = Table(eqs, names=names) return out @property def groups(self): if not hasattr(self, '_groups'): self._groups = groups.TableGroups(self) return self._groups def group_by(self, keys): """ Group this table by the specified ``keys`` This effectively splits the table into groups which correspond to unique values of the ``keys`` grouping object. The output is a new `~astropy.table.TableGroups` which contains a copy of this table but sorted by row according to ``keys``. The ``keys`` input to `group_by` can be specified in different ways: - String or list of strings corresponding to table column name(s) - Numpy array (homogeneous or structured) with same length as this table - `~astropy.table.Table` with same length as this table Parameters ---------- keys : str, list of str, numpy array, or `~astropy.table.Table` Key grouping object Returns ------- out : `~astropy.table.Table` New table with groups set """ return groups.table_group_by(self, keys) def to_pandas(self, index=None, use_nullable_int=True): """ Return a :class:`pandas.DataFrame` instance The index of the created DataFrame is controlled by the ``index`` argument. For ``index=True`` or the default ``None``, an index will be specified for the DataFrame if there is a primary key index on the Table *and* if it corresponds to a single column. If ``index=False`` then no DataFrame index will be specified. If ``index`` is the name of a column in the table then that will be the DataFrame index. In addition to vanilla columns or masked columns, this supports Table mixin columns like Quantity, Time, or SkyCoord. In many cases these objects have no analog in pandas and will be converted to a "encoded" representation using only Column or MaskedColumn. The exception is Time or TimeDelta columns, which will be converted to the corresponding representation in pandas using ``np.datetime64`` or ``np.timedelta64``. See the example below. Parameters ---------- index : None, bool, str Specify DataFrame index mode use_nullable_int : bool, default=True Convert integer MaskedColumn to pandas nullable integer type. If ``use_nullable_int=False`` or the pandas version does not support nullable integer types (version < 0.24), then the column is converted to float with NaN for missing elements and a warning is issued. Returns ------- dataframe : :class:`pandas.DataFrame` A pandas :class:`pandas.DataFrame` instance Raises ------ ImportError If pandas is not installed ValueError If the Table has multi-dimensional columns Examples -------- Here we convert a table with a few mixins to a :class:`pandas.DataFrame` instance. >>> import pandas as pd >>> from astropy.table import QTable >>> import astropy.units as u >>> from astropy.time import Time, TimeDelta >>> from astropy.coordinates import SkyCoord >>> q = [1, 2] * u.m >>> tm = Time([1998, 2002], format='jyear') >>> sc = SkyCoord([5, 6], [7, 8], unit='deg') >>> dt = TimeDelta([3, 200] * u.s) >>> t = QTable([q, tm, sc, dt], names=['q', 'tm', 'sc', 'dt']) >>> df = t.to_pandas(index='tm') >>> with pd.option_context('display.max_columns', 20): ... print(df) q sc.ra sc.dec dt tm 1998-01-01 1.0 5.0 7.0 0 days 00:00:03 2002-01-01 2.0 6.0 8.0 0 days 00:03:20 """ from pandas import DataFrame, Series if index is not False: if index in (None, True): # Default is to use the table primary key if available and a single column if self.primary_key and len(self.primary_key) == 1: index = self.primary_key[0] else: index = False else: if index not in self.colnames: raise ValueError('index must be None, False, True or a table ' 'column name') def _encode_mixins(tbl): """Encode a Table ``tbl`` that may have mixin columns to a Table with only astropy Columns + appropriate meta-data to allow subsequent decoding. """ from . import serialize from astropy.time import TimeBase, TimeDelta # Convert any Time or TimeDelta columns and pay attention to masking time_cols = [col for col in tbl.itercols() if isinstance(col, TimeBase)] if time_cols: # Make a light copy of table and clear any indices new_cols = [] for col in tbl.itercols(): new_col = col_copy(col, copy_indices=False) if col.info.indices else col new_cols.append(new_col) tbl = tbl.__class__(new_cols, copy=False) # Certain subclasses (e.g. TimeSeries) may generate new indices on # table creation, so make sure there are no indices on the table. for col in tbl.itercols(): col.info.indices.clear() for col in time_cols: if isinstance(col, TimeDelta): # Convert to nanoseconds (matches astropy datetime64 support) new_col = (col.sec * 1e9).astype('timedelta64[ns]') nat = np.timedelta64('NaT') else: new_col = col.datetime64.copy() nat = np.datetime64('NaT') if col.masked: new_col[col.mask] = nat tbl[col.info.name] = new_col # Convert the table to one with no mixins, only Column objects. encode_tbl = serialize.represent_mixins_as_columns(tbl) return encode_tbl tbl = _encode_mixins(self) badcols = [name for name, col in self.columns.items() if len(col.shape) > 1] if badcols: raise ValueError( f'Cannot convert a table with multidimensional columns to a ' f'pandas DataFrame. Offending columns are: {badcols}\n' f'One can filter out such columns using:\n' f'names = [name for name in tbl.colnames if len(tbl[name].shape) <= 1]\n' f'tbl[names].to_pandas(...)') out = OrderedDict() for name, column in tbl.columns.items(): if getattr(column.dtype, 'isnative', True): out[name] = column else: out[name] = column.data.byteswap().newbyteorder('=') if isinstance(column, MaskedColumn) and np.any(column.mask): if column.dtype.kind in ['i', 'u']: pd_dtype = column.dtype.name if use_nullable_int: # Convert int64 to Int64, uint32 to UInt32, etc for nullable types pd_dtype = pd_dtype.replace('i', 'I').replace('u', 'U') out[name] = Series(out[name], dtype=pd_dtype) # If pandas is older than 0.24 the type may have turned to float if column.dtype.kind != out[name].dtype.kind: warnings.warn( f"converted column '{name}' from {column.dtype} to {out[name].dtype}", TableReplaceWarning, stacklevel=3) elif column.dtype.kind not in ['f', 'c']: out[name] = column.astype(object).filled(np.nan) kwargs = {} if index: idx = out.pop(index) kwargs['index'] = idx # We add the table index to Series inputs (MaskedColumn with int values) to override # its default RangeIndex, see #11432 for v in out.values(): if isinstance(v, Series): v.index = idx df = DataFrame(out, **kwargs) if index: # Explicitly set the pandas DataFrame index to the original table # index name. df.index.name = idx.info.name return df @classmethod def from_pandas(cls, dataframe, index=False, units=None): """ Create a `~astropy.table.Table` from a :class:`pandas.DataFrame` instance In addition to converting generic numeric or string columns, this supports conversion of pandas Date and Time delta columns to `~astropy.time.Time` and `~astropy.time.TimeDelta` columns, respectively. Parameters ---------- dataframe : :class:`pandas.DataFrame` A pandas :class:`pandas.DataFrame` instance index : bool Include the index column in the returned table (default=False) units: dict A dict mapping column names to to a `~astropy.units.Unit`. The columns will have the specified unit in the Table. Returns ------- table : `~astropy.table.Table` A `~astropy.table.Table` (or subclass) instance Raises ------ ImportError If pandas is not installed Examples -------- Here we convert a :class:`pandas.DataFrame` instance to a `~astropy.table.QTable`. >>> import numpy as np >>> import pandas as pd >>> from astropy.table import QTable >>> time = pd.Series(['1998-01-01', '2002-01-01'], dtype='datetime64[ns]') >>> dt = pd.Series(np.array([1, 300], dtype='timedelta64[s]')) >>> df = pd.DataFrame({'time': time}) >>> df['dt'] = dt >>> df['x'] = [3., 4.] >>> with pd.option_context('display.max_columns', 20): ... print(df) time dt x 0 1998-01-01 0 days 00:00:01 3.0 1 2002-01-01 0 days 00:05:00 4.0 >>> QTable.from_pandas(df) <QTable length=2> time dt x Time TimeDelta float64 ----------------------- --------- ------- 1998-01-01T00:00:00.000 1.0 3.0 2002-01-01T00:00:00.000 300.0 4.0 """ out = OrderedDict() names = list(dataframe.columns) columns = [dataframe[name] for name in names] datas = [np.array(column) for column in columns] masks = [np.array(column.isnull()) for column in columns] if index: index_name = dataframe.index.name or 'index' while index_name in names: index_name = '_' + index_name + '_' names.insert(0, index_name) columns.insert(0, dataframe.index) datas.insert(0, np.array(dataframe.index)) masks.insert(0, np.zeros(len(dataframe), dtype=bool)) if units is None: units = [None] * len(names) else: if not isinstance(units, Mapping): raise TypeError('Expected a Mapping "column-name" -> "unit"') not_found = set(units.keys()) - set(names) if not_found: warnings.warn(f'`units` contains additional columns: {not_found}') units = [units.get(name) for name in names] for name, column, data, mask, unit in zip(names, columns, datas, masks, units): if column.dtype.kind in ['u', 'i'] and np.any(mask): # Special-case support for pandas nullable int np_dtype = str(column.dtype).lower() data = np.zeros(shape=column.shape, dtype=np_dtype) data[~mask] = column[~mask] out[name] = MaskedColumn(data=data, name=name, mask=mask, unit=unit, copy=False) continue if data.dtype.kind == 'O': # If all elements of an object array are string-like or np.nan # then coerce back to a native numpy str/unicode array. string_types = (str, bytes) nan = np.nan if all(isinstance(x, string_types) or x is nan for x in data): # Force any missing (null) values to b''. Numpy will # upcast to str/unicode as needed. data[mask] = b'' # When the numpy object array is represented as a list then # numpy initializes to the correct string or unicode type. data = np.array([x for x in data]) # Numpy datetime64 if data.dtype.kind == 'M': from astropy.time import Time out[name] = Time(data, format='datetime64') if np.any(mask): out[name][mask] = np.ma.masked out[name].format = 'isot' # Numpy timedelta64 elif data.dtype.kind == 'm': from astropy.time import TimeDelta data_sec = data.astype('timedelta64[ns]').astype(np.float64) / 1e9 out[name] = TimeDelta(data_sec, format='sec') if np.any(mask): out[name][mask] = np.ma.masked else: if np.any(mask): out[name] = MaskedColumn(data=data, name=name, mask=mask, unit=unit) else: out[name] = Column(data=data, name=name, unit=unit) return cls(out) info = TableInfo() class QTable(Table): """A class to represent tables of heterogeneous data. `~astropy.table.QTable` provides a class for heterogeneous tabular data which can be easily modified, for instance adding columns or new rows. The `~astropy.table.QTable` class is identical to `~astropy.table.Table` except that columns with an associated ``unit`` attribute are converted to `~astropy.units.Quantity` objects. See also: - https://docs.astropy.org/en/stable/table/ - https://docs.astropy.org/en/stable/table/mixin_columns.html Parameters ---------- data : numpy ndarray, dict, list, table-like object, optional Data to initialize table. masked : bool, optional Specify whether the table is masked. names : list, optional Specify column names. dtype : list, optional Specify column data types. meta : dict, optional Metadata associated with the table. copy : bool, optional Copy the input data. Default is True. rows : numpy ndarray, list of list, optional Row-oriented data for table instead of ``data`` argument. copy_indices : bool, optional Copy any indices in the input data. Default is True. **kwargs : dict, optional Additional keyword args when converting table-like object. """ def _is_mixin_for_table(self, col): """ Determine if ``col`` should be added to the table directly as a mixin column. """ return has_info_class(col, MixinInfo) def _convert_col_for_table(self, col): if isinstance(col, Column) and getattr(col, 'unit', None) is not None: # We need to turn the column into a quantity; use subok=True to allow # Quantity subclasses identified in the unit (such as u.mag()). q_cls = Masked(Quantity) if isinstance(col, MaskedColumn) else Quantity try: qcol = q_cls(col.data, col.unit, copy=False, subok=True) except Exception as exc: warnings.warn(f"column {col.info.name} has a unit but is kept as " f"a {col.__class__.__name__} as an attempt to " f"convert it to Quantity failed with:\n{exc!r}", AstropyUserWarning) else: qcol.info = col.info qcol.info.indices = col.info.indices col = qcol else: col = super()._convert_col_for_table(col) return col
bf74019a96521e5383d769dd9ba473541dabed8d565a9be7feac87042514dac1
# Licensed under a 3-clause BSD style license - see LICENSE.rst import itertools import warnings import weakref from copy import deepcopy import numpy as np from numpy import ma from astropy.units import Unit, Quantity, StructuredUnit from astropy.utils.console import color_print from astropy.utils.metadata import MetaData from astropy.utils.data_info import BaseColumnInfo, dtype_info_name from astropy.utils.misc import dtype_bytes_or_chars from . import groups from . import pprint # These "shims" provide __getitem__ implementations for Column and MaskedColumn from ._column_mixins import _ColumnGetitemShim, _MaskedColumnGetitemShim # Create a generic TableFormatter object for use by bare columns with no # parent table. FORMATTER = pprint.TableFormatter() class StringTruncateWarning(UserWarning): """ Warning class for when a string column is assigned a value that gets truncated because the base (numpy) string length is too short. This does not inherit from AstropyWarning because we want to use stacklevel=2 to show the user where the issue occurred in their code. """ pass # Always emit this warning, not just the first instance warnings.simplefilter('always', StringTruncateWarning) def _auto_names(n_cols): from . import conf return [str(conf.auto_colname).format(i) for i in range(n_cols)] # list of one and two-dimensional comparison functions, which sometimes return # a Column class and sometimes a plain array. Used in __array_wrap__ to ensure # they only return plain (masked) arrays (see #1446 and #1685) _comparison_functions = set( [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal, np.equal, np.isfinite, np.isinf, np.isnan, np.sign, np.signbit]) def col_copy(col, copy_indices=True): """ Mixin-safe version of Column.copy() (with copy_data=True). Parameters ---------- col : Column or mixin column Input column copy_indices : bool Copy the column ``indices`` attribute Returns ------- col : Copy of input column """ if isinstance(col, BaseColumn): return col.copy() newcol = col.copy() if hasattr(col, 'copy') else deepcopy(col) # If the column has info defined, we copy it and adjust any indices # to point to the copied column. By guarding with the if statement, # we avoid side effects (of creating the default info instance). if 'info' in col.__dict__: newcol.info = col.info if copy_indices and col.info.indices: newcol.info.indices = deepcopy(col.info.indices) for index in newcol.info.indices: index.replace_col(col, newcol) return newcol class FalseArray(np.ndarray): """ Boolean mask array that is always False. This is used to create a stub ``mask`` property which is a boolean array of ``False`` used by default for mixin columns and corresponding to the mixin column data shape. The ``mask`` looks like a normal numpy array but an exception will be raised if ``True`` is assigned to any element. The consequences of the limitation are most obvious in the high-level table operations. Parameters ---------- shape : tuple Data shape """ def __new__(cls, shape): obj = np.zeros(shape, dtype=bool).view(cls) return obj def __setitem__(self, item, val): val = np.asarray(val) if np.any(val): raise ValueError('Cannot set any element of {} class to True' .format(self.__class__.__name__)) def _expand_string_array_for_values(arr, values): """ For string-dtype return a version of ``arr`` that is wide enough for ``values``. If ``arr`` is not string-dtype or does not need expansion then return ``arr``. Parameters ---------- arr : np.ndarray Input array values : scalar or array-like Values for width comparison for string arrays Returns ------- arr_expanded : np.ndarray """ if arr.dtype.kind in ('U', 'S') and values is not np.ma.masked: # Find the length of the longest string in the new values. values_str_len = np.char.str_len(values).max() # Determine character repeat count of arr.dtype. Returns a positive # int or None (something like 'U0' is not possible in numpy). If new values # are longer than current then make a new (wider) version of arr. arr_str_len = dtype_bytes_or_chars(arr.dtype) if arr_str_len and values_str_len > arr_str_len: arr_dtype = arr.dtype.byteorder + arr.dtype.kind + str(values_str_len) arr = arr.astype(arr_dtype) return arr def _convert_sequence_data_to_array(data, dtype=None): """Convert N-d sequence-like data to ndarray or MaskedArray. This is the core function for converting Python lists or list of lists to a numpy array. This handles embedded np.ma.masked constants in ``data`` along with the special case of an homogeneous list of MaskedArray elements. Considerations: - np.ma.array is about 50 times slower than np.array for list input. This function avoids using np.ma.array on list input. - np.array emits a UserWarning for embedded np.ma.masked, but only for int or float inputs. For those it converts to np.nan and forces float dtype. For other types np.array is inconsistent, for instance converting np.ma.masked to "0.0" for str types. - Searching in pure Python for np.ma.masked in ``data`` is comparable in speed to calling ``np.array(data)``. - This function may end up making two additional copies of input ``data``. Parameters ---------- data : N-d sequence Input data, typically list or list of lists dtype : None or dtype-like Output datatype (None lets np.array choose) Returns ------- np_data : np.ndarray or np.ma.MaskedArray """ np_ma_masked = np.ma.masked # Avoid repeated lookups of this object # Special case of an homogeneous list of MaskedArray elements (see #8977). # np.ma.masked is an instance of MaskedArray, so exclude those values. if (hasattr(data, '__len__') and len(data) > 0 and all(isinstance(val, np.ma.MaskedArray) and val is not np_ma_masked for val in data)): np_data = np.ma.array(data, dtype=dtype) return np_data # First convert data to a plain ndarray. If there are instances of np.ma.masked # in the data this will issue a warning for int and float. with warnings.catch_warnings(record=True) as warns: # Ensure this warning from numpy is always enabled and that it is not # converted to an error (which can happen during pytest). warnings.filterwarnings('always', category=UserWarning, message='.*converting a masked element.*') # FutureWarning in numpy 1.21. See https://github.com/astropy/astropy/issues/11291 # and https://github.com/numpy/numpy/issues/18425. warnings.filterwarnings('always', category=FutureWarning, message='.*Promotion of numbers and bools to strings.*') try: np_data = np.array(data, dtype=dtype) except np.ma.MaskError: # Catches case of dtype=int with masked values, instead let it # convert to float np_data = np.array(data) except Exception: # Conversion failed for some reason, e.g. [2, 1*u.m] gives TypeError in Quantity. # First try to interpret the data as Quantity. If that still fails then fall # through to object try: np_data = Quantity(data, dtype) except Exception: dtype = object np_data = np.array(data, dtype=dtype) if np_data.ndim == 0 or (np_data.ndim > 0 and len(np_data) == 0): # Implies input was a scalar or an empty list (e.g. initializing an # empty table with pre-declared names and dtypes but no data). Here we # need to fall through to initializing with the original data=[]. return data # If there were no warnings and the data are int or float, then we are done. # Other dtypes like string or complex can have masked values and the # np.array() conversion gives the wrong answer (e.g. converting np.ma.masked # to the string "0.0"). if len(warns) == 0 and np_data.dtype.kind in ('i', 'f'): return np_data # Now we need to determine if there is an np.ma.masked anywhere in input data. # Make a statement like below to look for np.ma.masked in a nested sequence. # Because np.array(data) succeeded we know that `data` has a regular N-d # structure. Find ma_masked: # any(any(any(d2 is ma_masked for d2 in d1) for d1 in d0) for d0 in data) # Using this eval avoids creating a copy of `data` in the more-usual case of # no masked elements. any_statement = 'd0 is ma_masked' for ii in reversed(range(np_data.ndim)): if ii == 0: any_statement = f'any({any_statement} for d0 in data)' elif ii == np_data.ndim - 1: any_statement = f'any(d{ii} is ma_masked for d{ii} in d{ii-1})' else: any_statement = f'any({any_statement} for d{ii} in d{ii-1})' context = {'ma_masked': np.ma.masked, 'data': data} has_masked = eval(any_statement, context) # If there are any masks then explicitly change each one to a fill value and # set a mask boolean array. If not has_masked then we're done. if has_masked: mask = np.zeros(np_data.shape, dtype=bool) data_filled = np.array(data, dtype=object) # Make type-appropriate fill value based on initial conversion. if np_data.dtype.kind == 'U': fill = '' elif np_data.dtype.kind == 'S': fill = b'' else: # Zero works for every numeric type. fill = 0 ranges = [range(dim) for dim in np_data.shape] for idxs in itertools.product(*ranges): val = data_filled[idxs] if val is np_ma_masked: data_filled[idxs] = fill mask[idxs] = True elif isinstance(val, bool) and dtype is None: # If we see a bool and dtype not specified then assume bool for # the entire array. Not perfect but in most practical cases OK. # Unfortunately numpy types [False, 0] as int, not bool (and # [False, np.ma.masked] => array([0.0, np.nan])). dtype = bool # If no dtype is provided then need to convert back to list so np.array # does type autodetection. if dtype is None: data_filled = data_filled.tolist() # Use np.array first to convert `data` to ndarray (fast) and then make # masked array from an ndarray with mask (fast) instead of from `data`. np_data = np.ma.array(np.array(data_filled, dtype=dtype), mask=mask) return np_data def _make_compare(oper): """ Make Column comparison methods which encode the ``other`` object to utf-8 in the case of a bytestring dtype for Py3+. Parameters ---------- oper : str Operator name """ swapped_oper = {'__eq__': '__eq__', '__ne__': '__ne__', '__gt__': '__lt__', '__lt__': '__gt__', '__ge__': '__le__', '__le__': '__ge__'}[oper] def _compare(self, other): op = oper # copy enclosed ref to allow swap below # Special case to work around #6838. Other combinations work OK, # see tests.test_column.test_unicode_sandwich_compare(). In this # case just swap self and other. # # This is related to an issue in numpy that was addressed in np 1.13. # However that fix does not make this problem go away, but maybe # future numpy versions will do so. NUMPY_LT_1_13 to get the # attention of future maintainers to check (by deleting or versioning # the if block below). See #6899 discussion. # 2019-06-21: still needed with numpy 1.16. if (isinstance(self, MaskedColumn) and self.dtype.kind == 'U' and isinstance(other, MaskedColumn) and other.dtype.kind == 'S'): self, other = other, self op = swapped_oper if self.dtype.char == 'S': other = self._encode_str(other) # Now just let the regular ndarray.__eq__, etc., take over. result = getattr(super(Column, self), op)(other) # But we should not return Column instances for this case. return result.data if isinstance(result, Column) else result return _compare class ColumnInfo(BaseColumnInfo): """ Container for meta information like name, description, format. This is required when the object is used as a mixin column within a table, but can be used as a general way to store meta information. """ attr_names = BaseColumnInfo.attr_names | {'groups'} _attrs_no_copy = BaseColumnInfo._attrs_no_copy | {'groups'} attrs_from_parent = attr_names _supports_indexing = True # For structured columns, data is used to store a dict of columns. # Store entries in that dict as name.key instead of name.data.key. _represent_as_dict_primary_data = 'data' def _represent_as_dict(self): result = super()._represent_as_dict() names = self._parent.dtype.names # For a regular column, we are done, but for a structured # column, we use a SerializedColumns to store the pieces. if names is None: return result from .serialize import SerializedColumn data = SerializedColumn() # If this column has a StructuredUnit, we split it and store # it on the corresponding part. Otherwise, we just store it # as an attribute below. All other attributes we remove from # the parts, so that we do not store them multiple times. # (Note that attributes are not linked to the parent, so it # is safe to reset them.) # TODO: deal with (some of) this in Column.__getitem__? # Alternatively: should we store info on the first part? # TODO: special-case format somehow? Can we have good formats # for structured columns? unit = self.unit if isinstance(unit, StructuredUnit) and len(unit) == len(names): units = unit.values() unit = None # No need to store as an attribute as well. else: units = [None] * len(names) for name, part_unit in zip(names, units): part = self._parent[name] part.unit = part_unit part.description = None part.meta = {} part.format = None data[name] = part # Create the attributes required to reconstruct the column. result['data'] = data # Store the shape if needed. Just like scalar data, a structured data # column (e.g. with dtype `f8,i8`) can be multidimensional within each # row and have a shape, and that needs to be distinguished from the # case that each entry in the structure has the same shape (e.g., # distinguist a column with dtype='f8,i8' and 2 elements per row from # one with dtype '2f8,2i8' and just one element per row). if shape := self._parent.shape[1:]: result['shape'] = list(shape) # Also store the standard info attributes since these are # stored on the parent and can thus just be passed on as # arguments. TODO: factor out with essentially the same # code in serialize._represent_mixin_as_column. if unit is not None and unit != '': result['unit'] = unit if self.format is not None: result['format'] = self.format if self.description is not None: result['description'] = self.description if self.meta: result['meta'] = self.meta return result def _construct_from_dict(self, map): if not isinstance(map.get('data'), dict): return super()._construct_from_dict(map) # Reconstruct a structured Column, by first making an empty column # and then filling it with the structured data. data = map.pop('data') shape = tuple(map.pop('shape', ())) # There are three elements in the shape of `part`: # (table length, shape of structured column, shape of part like '3f8') # The column `shape` only includes the second, so by adding one to its # length to include the table length, we pick off a possible last bit. dtype = np.dtype([(name, part.dtype, part.shape[len(shape)+1:]) for name, part in data.items()]) units = tuple(col.info.unit for col in data.values()) if all(unit is not None for unit in units): map['unit'] = StructuredUnit(units, dtype) map.update(dtype=dtype, shape=shape, length=len(data[dtype.names[0]])) # Construct the empty column from `map` (note: 'data' removed above). result = super()._construct_from_dict(map) # Fill it with the structured data. for name in dtype.names: result[name] = data[name] return result def new_like(self, cols, length, metadata_conflicts='warn', name=None): """ Return a new Column instance which is consistent with the input ``cols`` and has ``length`` rows. This is intended for creating an empty column object whose elements can be set in-place for table operations like join or vstack. Parameters ---------- cols : list List of input columns length : int Length of the output column object metadata_conflicts : str ('warn'|'error'|'silent') How to handle metadata conflicts name : str Output column name Returns ------- col : Column (or subclass) New instance of this class consistent with ``cols`` """ attrs = self.merge_cols_attributes(cols, metadata_conflicts, name, ('meta', 'unit', 'format', 'description')) return self._parent_cls(length=length, **attrs) def get_sortable_arrays(self): """ Return a list of arrays which can be lexically sorted to represent the order of the parent column. For Column this is just the column itself. Returns ------- arrays : list of ndarray """ return [self._parent] class BaseColumn(_ColumnGetitemShim, np.ndarray): meta = MetaData() def __new__(cls, data=None, name=None, dtype=None, shape=(), length=0, description=None, unit=None, format=None, meta=None, copy=False, copy_indices=True): if data is None: self_data = np.zeros((length,)+shape, dtype=dtype) elif isinstance(data, BaseColumn) and hasattr(data, '_name'): # When unpickling a MaskedColumn, ``data`` will be a bare # BaseColumn with none of the expected attributes. In this case # do NOT execute this block which initializes from ``data`` # attributes. self_data = np.array(data.data, dtype=dtype, copy=copy) if description is None: description = data.description if unit is None: unit = unit or data.unit if format is None: format = data.format if meta is None: meta = data.meta if name is None: name = data.name elif isinstance(data, Quantity): if unit is None: self_data = np.array(data, dtype=dtype, copy=copy) unit = data.unit else: self_data = Quantity(data, unit, dtype=dtype, copy=copy).value # If 'info' has been defined, copy basic properties (if needed). if 'info' in data.__dict__: if description is None: description = data.info.description if format is None: format = data.info.format if meta is None: meta = data.info.meta else: if np.dtype(dtype).char == 'S': data = cls._encode_str(data) self_data = np.array(data, dtype=dtype, copy=copy) self = self_data.view(cls) self._name = None if name is None else str(name) self._parent_table = None self.unit = unit self._format = format self.description = description self.meta = meta self.indices = deepcopy(getattr(data, 'indices', [])) if copy_indices else [] for index in self.indices: index.replace_col(data, self) return self @property def data(self): return self.view(np.ndarray) @property def value(self): """ An alias for the existing ``data`` attribute. """ return self.data @property def parent_table(self): # Note: It seems there are some cases where _parent_table is not set, # such after restoring from a pickled Column. Perhaps that should be # fixed, but this is also okay for now. if getattr(self, '_parent_table', None) is None: return None else: return self._parent_table() @parent_table.setter def parent_table(self, table): if table is None: self._parent_table = None else: self._parent_table = weakref.ref(table) info = ColumnInfo() def copy(self, order='C', data=None, copy_data=True): """ Return a copy of the current instance. If ``data`` is supplied then a view (reference) of ``data`` is used, and ``copy_data`` is ignored. Parameters ---------- order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout of the copy. 'C' means C-order, 'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of ``a`` as closely as possible. (Note that this function and :func:numpy.copy are very similar, but have different default values for their order= arguments.) Default is 'C'. data : array, optional If supplied then use a view of ``data`` instead of the instance data. This allows copying the instance attributes and meta. copy_data : bool, optional Make a copy of the internal numpy array instead of using a reference. Default is True. Returns ------- col : Column or MaskedColumn Copy of the current column (same type as original) """ if data is None: data = self.data if copy_data: data = data.copy(order) out = data.view(self.__class__) out.__array_finalize__(self) # If there is meta on the original column then deepcopy (since "copy" of column # implies complete independence from original). __array_finalize__ will have already # made a light copy. I'm not sure how to avoid that initial light copy. if self.meta is not None: out.meta = self.meta # MetaData descriptor does a deepcopy here # for MaskedColumn, MaskedArray.__array_finalize__ also copies mask # from self, which is not the idea here, so undo if isinstance(self, MaskedColumn): out._mask = data._mask self._copy_groups(out) return out def __setstate__(self, state): """ Restore the internal state of the Column/MaskedColumn for pickling purposes. This requires that the last element of ``state`` is a 5-tuple that has Column-specific state values. """ # Get the Column attributes names = ('_name', '_unit', '_format', 'description', 'meta', 'indices') attrs = {name: val for name, val in zip(names, state[-1])} state = state[:-1] # Using super().__setstate__(state) gives # "TypeError 'int' object is not iterable", raised in # astropy.table._column_mixins._ColumnGetitemShim.__setstate_cython__() # Previously, it seems to have given an infinite recursion. # Hence, manually call the right super class to actually set up # the array object. super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray super_class.__setstate__(self, state) # Set the Column attributes for name, val in attrs.items(): setattr(self, name, val) self._parent_table = None def __reduce__(self): """ Return a 3-tuple for pickling a Column. Use the super-class functionality but then add in a 5-tuple of Column-specific values that get used in __setstate__. """ super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray reconstruct_func, reconstruct_func_args, state = super_class.__reduce__(self) # Define Column-specific attrs and meta that gets added to state. column_state = (self.name, self.unit, self.format, self.description, self.meta, self.indices) state = state + (column_state,) return reconstruct_func, reconstruct_func_args, state def __array_finalize__(self, obj): # Obj will be none for direct call to Column() creator if obj is None: return if callable(super().__array_finalize__): super().__array_finalize__(obj) # Self was created from template (e.g. obj[slice] or (obj * 2)) # or viewcast e.g. obj.view(Column). In either case we want to # init Column attributes for self from obj if possible. self.parent_table = None if not hasattr(self, 'indices'): # may have been copied in __new__ self.indices = [] self._copy_attrs(obj) if 'info' in getattr(obj, '__dict__', {}): self.info = obj.info def __array_wrap__(self, out_arr, context=None): """ __array_wrap__ is called at the end of every ufunc. Normally, we want a Column object back and do not have to do anything special. But there are two exceptions: 1) If the output shape is different (e.g. for reduction ufuncs like sum() or mean()), a Column still linking to a parent_table makes little sense, so we return the output viewed as the column content (ndarray or MaskedArray). For this case, we use "[()]" to select everything, and to ensure we convert a zero rank array to a scalar. (For some reason np.sum() returns a zero rank scalar array while np.mean() returns a scalar; So the [()] is needed for this case. 2) When the output is created by any function that returns a boolean we also want to consistently return an array rather than a column (see #1446 and #1685) """ out_arr = super().__array_wrap__(out_arr, context) if (self.shape != out_arr.shape or (isinstance(out_arr, BaseColumn) and (context is not None and context[0] in _comparison_functions))): return out_arr.data[()] else: return out_arr @property def name(self): """ The name of this column. """ return self._name @name.setter def name(self, val): if val is not None: val = str(val) if self.parent_table is not None: table = self.parent_table table.columns._rename_column(self.name, val) self._name = val @property def format(self): """ Format string for displaying values in this column. """ return self._format @format.setter def format(self, format_string): prev_format = getattr(self, '_format', None) self._format = format_string # set new format string try: # test whether it formats without error exemplarily self.pformat(max_lines=1) except Exception as err: # revert to restore previous format if there was one self._format = prev_format raise ValueError( "Invalid format for column '{}': could not display " "values in this column using this format".format( self.name)) from err @property def descr(self): """Array-interface compliant full description of the column. This returns a 3-tuple (name, type, shape) that can always be used in a structured array dtype definition. """ return (self.name, self.dtype.str, self.shape[1:]) def iter_str_vals(self): """ Return an iterator that yields the string-formatted values of this column. Returns ------- str_vals : iterator Column values formatted as strings """ # Iterate over formatted values with no max number of lines, no column # name, no unit, and ignoring the returned header info in outs. _pformat_col_iter = self._formatter._pformat_col_iter for str_val in _pformat_col_iter(self, -1, show_name=False, show_unit=False, show_dtype=False, outs={}): yield str_val def attrs_equal(self, col): """Compare the column attributes of ``col`` to this object. The comparison attributes are: ``name``, ``unit``, ``dtype``, ``format``, ``description``, and ``meta``. Parameters ---------- col : Column Comparison column Returns ------- equal : bool True if all attributes are equal """ if not isinstance(col, BaseColumn): raise ValueError('Comparison `col` must be a Column or ' 'MaskedColumn object') attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta') equal = all(getattr(self, x) == getattr(col, x) for x in attrs) return equal @property def _formatter(self): return FORMATTER if (self.parent_table is None) else self.parent_table.formatter def pformat(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False, html=False): """Return a list of formatted string representation of column values. If no value of ``max_lines`` is supplied then the height of the screen terminal is used to set ``max_lines``. If the terminal height cannot be determined then the default will be determined using the ``astropy.conf.max_lines`` configuration item. If a negative value of ``max_lines`` is supplied then there is no line limit applied. Parameters ---------- max_lines : int Maximum lines of output (header + data rows) show_name : bool Include column name. Default is True. show_unit : bool Include a header row for unit. Default is False. show_dtype : bool Include column dtype. Default is False. html : bool Format the output as an HTML table. Default is False. Returns ------- lines : list List of lines with header and formatted column values """ _pformat_col = self._formatter._pformat_col lines, outs = _pformat_col(self, max_lines, show_name=show_name, show_unit=show_unit, show_dtype=show_dtype, html=html) return lines def pprint(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False): """Print a formatted string representation of column values. If no value of ``max_lines`` is supplied then the height of the screen terminal is used to set ``max_lines``. If the terminal height cannot be determined then the default will be determined using the ``astropy.conf.max_lines`` configuration item. If a negative value of ``max_lines`` is supplied then there is no line limit applied. Parameters ---------- max_lines : int Maximum number of values in output show_name : bool Include column name. Default is True. show_unit : bool Include a header row for unit. Default is False. show_dtype : bool Include column dtype. Default is True. """ _pformat_col = self._formatter._pformat_col lines, outs = _pformat_col(self, max_lines, show_name=show_name, show_unit=show_unit, show_dtype=show_dtype) n_header = outs['n_header'] for i, line in enumerate(lines): if i < n_header: color_print(line, 'red') else: print(line) def more(self, max_lines=None, show_name=True, show_unit=False): """Interactively browse column with a paging interface. Supported keys:: f, <space> : forward one page b : back one page r : refresh same page n : next row p : previous row < : go to beginning > : go to end q : quit browsing h : print this help Parameters ---------- max_lines : int Maximum number of lines in table output. show_name : bool Include a header row for column names. Default is True. show_unit : bool Include a header row for unit. Default is False. """ _more_tabcol = self._formatter._more_tabcol _more_tabcol(self, max_lines=max_lines, show_name=show_name, show_unit=show_unit) @property def unit(self): """ The unit associated with this column. May be a string or a `astropy.units.UnitBase` instance. Setting the ``unit`` property does not change the values of the data. To perform a unit conversion, use ``convert_unit_to``. """ return self._unit @unit.setter def unit(self, unit): if unit is None: self._unit = None else: self._unit = Unit(unit, parse_strict='silent') @unit.deleter def unit(self): self._unit = None def searchsorted(self, v, side='left', sorter=None): # For bytes type data, encode the `v` value as UTF-8 (if necessary) before # calling searchsorted. This prevents a factor of 1000 slowdown in # searchsorted in this case. a = self.data if a.dtype.kind == 'S' and not isinstance(v, bytes): v = np.asarray(v) if v.dtype.kind == 'U': v = np.char.encode(v, 'utf-8') return np.searchsorted(a, v, side=side, sorter=sorter) searchsorted.__doc__ = np.ndarray.searchsorted.__doc__ def convert_unit_to(self, new_unit, equivalencies=[]): """ Converts the values of the column in-place from the current unit to the given unit. To change the unit associated with this column without actually changing the data values, simply set the ``unit`` property. Parameters ---------- new_unit : str or `astropy.units.UnitBase` instance The unit to convert to. equivalencies : list of tuple A list of equivalence pairs to try if the unit are not directly convertible. See :ref:`astropy:unit_equivalencies`. Raises ------ astropy.units.UnitsError If units are inconsistent """ if self.unit is None: raise ValueError("No unit set on column") self.data[:] = self.unit.to( new_unit, self.data, equivalencies=equivalencies) self.unit = new_unit @property def groups(self): if not hasattr(self, '_groups'): self._groups = groups.ColumnGroups(self) return self._groups def group_by(self, keys): """ Group this column by the specified ``keys`` This effectively splits the column into groups which correspond to unique values of the ``keys`` grouping object. The output is a new `Column` or `MaskedColumn` which contains a copy of this column but sorted by row according to ``keys``. The ``keys`` input to ``group_by`` must be a numpy array with the same length as this column. Parameters ---------- keys : numpy array Key grouping object Returns ------- out : Column New column with groups attribute set accordingly """ return groups.column_group_by(self, keys) def _copy_groups(self, out): """ Copy current groups into a copy of self ``out`` """ if self.parent_table: if hasattr(self.parent_table, '_groups'): out._groups = groups.ColumnGroups(out, indices=self.parent_table._groups._indices) elif hasattr(self, '_groups'): out._groups = groups.ColumnGroups(out, indices=self._groups._indices) # Strip off the BaseColumn-ness for repr and str so that # MaskedColumn.data __repr__ does not include masked_BaseColumn(data = # [1 2], ...). def __repr__(self): return np.asarray(self).__repr__() @property def quantity(self): """ A view of this table column as a `~astropy.units.Quantity` object with units given by the Column's `unit` parameter. """ # the Quantity initializer is used here because it correctly fails # if the column's values are non-numeric (like strings), while .view # will happily return a quantity with gibberish for numerical values return Quantity(self, self.unit, copy=False, dtype=self.dtype, order='A', subok=True) def to(self, unit, equivalencies=[], **kwargs): """ Converts this table column to a `~astropy.units.Quantity` object with the requested units. Parameters ---------- unit : unit-like The unit to convert to (i.e., a valid argument to the :meth:`astropy.units.Quantity.to` method). equivalencies : list of tuple Equivalencies to use for this conversion. See :meth:`astropy.units.Quantity.to` for more details. Returns ------- quantity : `~astropy.units.Quantity` A quantity object with the contents of this column in the units ``unit``. """ return self.quantity.to(unit, equivalencies) def _copy_attrs(self, obj): """ Copy key column attributes from ``obj`` to self """ for attr in ('name', 'unit', '_format', 'description'): val = getattr(obj, attr, None) setattr(self, attr, val) # Light copy of meta if it is not empty obj_meta = getattr(obj, 'meta', None) if obj_meta: self.meta = obj_meta.copy() @staticmethod def _encode_str(value): """ Encode anything that is unicode-ish as utf-8. This method is only called for Py3+. """ if isinstance(value, str): value = value.encode('utf-8') elif isinstance(value, bytes) or value is np.ma.masked: pass else: arr = np.asarray(value) if arr.dtype.char == 'U': arr = np.char.encode(arr, encoding='utf-8') if isinstance(value, np.ma.MaskedArray): arr = np.ma.array(arr, mask=value.mask, copy=False) value = arr return value def tolist(self): if self.dtype.kind == 'S': return np.chararray.decode(self, encoding='utf-8').tolist() else: return super().tolist() class Column(BaseColumn): """Define a data column for use in a Table object. Parameters ---------- data : list, ndarray, or None Column data values name : str Column name and key for reference within Table dtype : `~numpy.dtype`-like Data type for column shape : tuple or () Dimensions of a single row element in the column data length : int or 0 Number of row elements in column data description : str or None Full description of column unit : str or None Physical unit format : str, None, or callable Format string for outputting column values. This can be an "old-style" (``format % value``) or "new-style" (`str.format`) format specification string or a function or any callable object that accepts a single value and returns a string. meta : dict-like or None Meta-data associated with the column Examples -------- A Column can be created in two different ways: - Provide a ``data`` value but not ``shape`` or ``length`` (which are inferred from the data). Examples:: col = Column(data=[1, 2], name='name') # shape=(2,) col = Column(data=[[1, 2], [3, 4]], name='name') # shape=(2, 2) col = Column(data=[1, 2], name='name', dtype=float) col = Column(data=np.array([1, 2]), name='name') col = Column(data=['hello', 'world'], name='name') The ``dtype`` argument can be any value which is an acceptable fixed-size data-type initializer for the numpy.dtype() method. See `<https://numpy.org/doc/stable/reference/arrays.dtypes.html>`_. Examples include: - Python non-string type (float, int, bool) - Numpy non-string type (e.g. np.float32, np.int64, np.bool\\_) - Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15') If no ``dtype`` value is provide then the type is inferred using ``np.array(data)``. - Provide ``length`` and optionally ``shape``, but not ``data`` Examples:: col = Column(name='name', length=5) col = Column(name='name', dtype=int, length=10, shape=(3,4)) The default ``dtype`` is ``np.float64``. The ``shape`` argument is the array shape of a single cell in the column. To access the ``Column`` data as a raw `numpy.ndarray` object, you can use one of the ``data`` or ``value`` attributes (which are equivalent):: col.data col.value """ def __new__(cls, data=None, name=None, dtype=None, shape=(), length=0, description=None, unit=None, format=None, meta=None, copy=False, copy_indices=True): if isinstance(data, MaskedColumn) and np.any(data.mask): raise TypeError("Cannot convert a MaskedColumn with masked value to a Column") self = super().__new__( cls, data=data, name=name, dtype=dtype, shape=shape, length=length, description=description, unit=unit, format=format, meta=meta, copy=copy, copy_indices=copy_indices) return self def __setattr__(self, item, value): if not isinstance(self, MaskedColumn) and item == "mask": raise AttributeError("cannot set mask value to a column in non-masked Table") super().__setattr__(item, value) if item == 'unit' and issubclass(self.dtype.type, np.number): try: converted = self.parent_table._convert_col_for_table(self) except AttributeError: # Either no parent table or parent table is None pass else: if converted is not self: self.parent_table.replace_column(self.name, converted) def _base_repr_(self, html=False): # If scalar then just convert to correct numpy type and use numpy repr if self.ndim == 0: return repr(self.item()) descr_vals = [self.__class__.__name__] unit = None if self.unit is None else str(self.unit) shape = None if self.ndim <= 1 else self.shape[1:] for attr, val in (('name', self.name), ('dtype', dtype_info_name(self.dtype)), ('shape', shape), ('unit', unit), ('format', self.format), ('description', self.description), ('length', len(self))): if val is not None: descr_vals.append(f'{attr}={val!r}') descr = '<' + ' '.join(descr_vals) + '>\n' if html: from astropy.utils.xml.writer import xml_escape descr = xml_escape(descr) data_lines, outs = self._formatter._pformat_col( self, show_name=False, show_unit=False, show_length=False, html=html) out = descr + '\n'.join(data_lines) return out def _repr_html_(self): return self._base_repr_(html=True) def __repr__(self): return self._base_repr_(html=False) def __str__(self): # If scalar then just convert to correct numpy type and use numpy repr if self.ndim == 0: return str(self.item()) lines, outs = self._formatter._pformat_col(self) return '\n'.join(lines) def __bytes__(self): return str(self).encode('utf-8') def _check_string_truncate(self, value): """ Emit a warning if any elements of ``value`` will be truncated when ``value`` is assigned to self. """ # Convert input ``value`` to the string dtype of this column and # find the length of the longest string in the array. value = np.asanyarray(value, dtype=self.dtype.type) if value.size == 0: return value_str_len = np.char.str_len(value).max() # Parse the array-protocol typestring (e.g. '|U15') of self.dtype which # has the character repeat count on the right side. self_str_len = dtype_bytes_or_chars(self.dtype) if value_str_len > self_str_len: warnings.warn('truncated right side string(s) longer than {} ' 'character(s) during assignment' .format(self_str_len), StringTruncateWarning, stacklevel=3) def __setitem__(self, index, value): if self.dtype.char == 'S': value = self._encode_str(value) # Issue warning for string assignment that truncates ``value`` if issubclass(self.dtype.type, np.character): self._check_string_truncate(value) # update indices self.info.adjust_indices(index, value, len(self)) # Set items using a view of the underlying data, as it gives an # order-of-magnitude speed-up. [#2994] self.data[index] = value __eq__ = _make_compare('__eq__') __ne__ = _make_compare('__ne__') __gt__ = _make_compare('__gt__') __lt__ = _make_compare('__lt__') __ge__ = _make_compare('__ge__') __le__ = _make_compare('__le__') def insert(self, obj, values, axis=0): """ Insert values before the given indices in the column and return a new `~astropy.table.Column` object. Parameters ---------- obj : int, slice or sequence of int Object that defines the index or indices before which ``values`` is inserted. values : array-like Value(s) to insert. If the type of ``values`` is different from that of the column, ``values`` is converted to the matching type. ``values`` should be shaped so that it can be broadcast appropriately. axis : int, optional Axis along which to insert ``values``. If ``axis`` is None then the column array is flattened before insertion. Default is 0, which will insert a row. Returns ------- out : `~astropy.table.Column` A copy of column with ``values`` and ``mask`` inserted. Note that the insertion does not occur in-place: a new column is returned. """ if self.dtype.kind == 'O': # Even if values is array-like (e.g. [1,2,3]), insert as a single # object. Numpy.insert instead inserts each element in an array-like # input individually. data = np.insert(self, obj, None, axis=axis) data[obj] = values else: self_for_insert = _expand_string_array_for_values(self, values) data = np.insert(self_for_insert, obj, values, axis=axis) out = data.view(self.__class__) out.__array_finalize__(self) return out # We do this to make the methods show up in the API docs name = BaseColumn.name unit = BaseColumn.unit copy = BaseColumn.copy more = BaseColumn.more pprint = BaseColumn.pprint pformat = BaseColumn.pformat convert_unit_to = BaseColumn.convert_unit_to quantity = BaseColumn.quantity to = BaseColumn.to class MaskedColumnInfo(ColumnInfo): """ Container for meta information like name, description, format. This is required when the object is used as a mixin column within a table, but can be used as a general way to store meta information. In this case it just adds the ``mask_val`` attribute. """ # Add `serialize_method` attribute to the attrs that MaskedColumnInfo knows # about. This allows customization of the way that MaskedColumn objects # get written to file depending on format. The default is to use whatever # the writer would normally do, which in the case of FITS or ECSV is to use # a NULL value within the data itself. If serialize_method is 'data_mask' # then the mask is explicitly written out as a separate column if there # are any masked values. See also code below. attr_names = ColumnInfo.attr_names | {'serialize_method'} # When `serialize_method` is 'data_mask', and data and mask are being written # as separate columns, use column names <name> and <name>.mask (instead # of default encoding as <name>.data and <name>.mask). _represent_as_dict_primary_data = 'data' mask_val = np.ma.masked def __init__(self, bound=False): super().__init__(bound) # If bound to a data object instance then create the dict of attributes # which stores the info attribute values. if bound: # Specify how to serialize this object depending on context. self.serialize_method = {'fits': 'null_value', 'ecsv': 'null_value', 'hdf5': 'data_mask', 'parquet': 'data_mask', None: 'null_value'} def _represent_as_dict(self): out = super()._represent_as_dict() # If we are a structured masked column, then our parent class, # ColumnInfo, will already have set up a dict with masked parts, # which will be serialized later, so no further work needed here. if self._parent.dtype.names is not None: return out col = self._parent # If the serialize method for this context (e.g. 'fits' or 'ecsv') is # 'data_mask', that means to serialize using an explicit mask column. method = self.serialize_method[self._serialize_context] if method == 'data_mask': # Note: a driver here is a performance issue in #8443 where repr() of a # np.ma.MaskedArray value is up to 10 times slower than repr of a normal array # value. So regardless of whether there are masked elements it is useful to # explicitly define this as a serialized column and use col.data.data (ndarray) # instead of letting it fall through to the "standard" serialization machinery. out['data'] = col.data.data if np.any(col.mask): # Only if there are actually masked elements do we add the ``mask`` column out['mask'] = col.mask elif method == 'null_value': pass else: raise ValueError('serialize method must be either "data_mask" or "null_value"') return out class MaskedColumn(Column, _MaskedColumnGetitemShim, ma.MaskedArray): """Define a masked data column for use in a Table object. Parameters ---------- data : list, ndarray, or None Column data values name : str Column name and key for reference within Table mask : list, ndarray or None Boolean mask for which True indicates missing or invalid data fill_value : float, int, str, or None Value used when filling masked column elements dtype : `~numpy.dtype`-like Data type for column shape : tuple or () Dimensions of a single row element in the column data length : int or 0 Number of row elements in column data description : str or None Full description of column unit : str or None Physical unit format : str, None, or callable Format string for outputting column values. This can be an "old-style" (``format % value``) or "new-style" (`str.format`) format specification string or a function or any callable object that accepts a single value and returns a string. meta : dict-like or None Meta-data associated with the column Examples -------- A MaskedColumn is similar to a Column except that it includes ``mask`` and ``fill_value`` attributes. It can be created in two different ways: - Provide a ``data`` value but not ``shape`` or ``length`` (which are inferred from the data). Examples:: col = MaskedColumn(data=[1, 2], name='name') col = MaskedColumn(data=[1, 2], name='name', mask=[True, False]) col = MaskedColumn(data=[1, 2], name='name', dtype=float, fill_value=99) The ``mask`` argument will be cast as a boolean array and specifies which elements are considered to be missing or invalid. The ``dtype`` argument can be any value which is an acceptable fixed-size data-type initializer for the numpy.dtype() method. See `<https://numpy.org/doc/stable/reference/arrays.dtypes.html>`_. Examples include: - Python non-string type (float, int, bool) - Numpy non-string type (e.g. np.float32, np.int64, np.bool\\_) - Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15') If no ``dtype`` value is provide then the type is inferred using ``np.array(data)``. When ``data`` is provided then the ``shape`` and ``length`` arguments are ignored. - Provide ``length`` and optionally ``shape``, but not ``data`` Examples:: col = MaskedColumn(name='name', length=5) col = MaskedColumn(name='name', dtype=int, length=10, shape=(3,4)) The default ``dtype`` is ``np.float64``. The ``shape`` argument is the array shape of a single cell in the column. To access the ``Column`` data as a raw `numpy.ma.MaskedArray` object, you can use one of the ``data`` or ``value`` attributes (which are equivalent):: col.data col.value """ info = MaskedColumnInfo() def __new__(cls, data=None, name=None, mask=None, fill_value=None, dtype=None, shape=(), length=0, description=None, unit=None, format=None, meta=None, copy=False, copy_indices=True): if mask is None: # If mask is None then we need to determine the mask (if any) from the data. # The naive method is looking for a mask attribute on data, but this can fail, # see #8816. Instead use ``MaskedArray`` to do the work. mask = ma.MaskedArray(data).mask if mask is np.ma.nomask: # Handle odd-ball issue with np.ma.nomask (numpy #13758), and see below. mask = False elif copy: mask = mask.copy() elif mask is np.ma.nomask: # Force the creation of a full mask array as nomask is tricky to # use and will fail in an unexpected manner when setting a value # to the mask. mask = False else: mask = deepcopy(mask) # Create self using MaskedArray as a wrapper class, following the example of # class MSubArray in # https://github.com/numpy/numpy/blob/maintenance/1.8.x/numpy/ma/tests/test_subclassing.py # This pattern makes it so that __array_finalize__ is called as expected (e.g. #1471 and # https://github.com/astropy/astropy/commit/ff6039e8) # First just pass through all args and kwargs to BaseColumn, then wrap that object # with MaskedArray. self_data = BaseColumn(data, dtype=dtype, shape=shape, length=length, name=name, unit=unit, format=format, description=description, meta=meta, copy=copy, copy_indices=copy_indices) self = ma.MaskedArray.__new__(cls, data=self_data, mask=mask) # The above process preserves info relevant for Column, but this does # not include serialize_method (and possibly other future attributes) # relevant for MaskedColumn, so we set info explicitly. if 'info' in getattr(data, '__dict__', {}): self.info = data.info # Note: do not set fill_value in the MaskedArray constructor because this does not # go through the fill_value workarounds. if fill_value is None and getattr(data, 'fill_value', None) is not None: # Coerce the fill_value to the correct type since `data` may be a # different dtype than self. fill_value = np.array(data.fill_value, self.dtype)[()] self.fill_value = fill_value self.parent_table = None # needs to be done here since self doesn't come from BaseColumn.__new__ for index in self.indices: index.replace_col(self_data, self) return self @property def fill_value(self): return self.get_fill_value() # defer to native ma.MaskedArray method @fill_value.setter def fill_value(self, val): """Set fill value both in the masked column view and in the parent table if it exists. Setting one or the other alone doesn't work.""" # another ma bug workaround: If the value of fill_value for a string array is # requested but not yet set then it gets created as 'N/A'. From this point onward # any new fill_values are truncated to 3 characters. Note that this does not # occur if the masked array is a structured array (as in the previous block that # deals with the parent table). # # >>> x = ma.array(['xxxx']) # >>> x.fill_value # fill_value now gets represented as an 'S3' array # 'N/A' # >>> x.fill_value='yyyy' # >>> x.fill_value # 'yyy' # # To handle this we are forced to reset a private variable first: self._fill_value = None self.set_fill_value(val) # defer to native ma.MaskedArray method @property def data(self): """The plain MaskedArray data held by this column.""" out = self.view(np.ma.MaskedArray) # By default, a MaskedArray view will set the _baseclass to be the # same as that of our own class, i.e., BaseColumn. Since we want # to return a plain MaskedArray, we reset the baseclass accordingly. out._baseclass = np.ndarray return out def filled(self, fill_value=None): """Return a copy of self, with masked values filled with a given value. Parameters ---------- fill_value : scalar; optional The value to use for invalid entries (`None` by default). If `None`, the ``fill_value`` attribute of the array is used instead. Returns ------- filled_column : Column A copy of ``self`` with masked entries replaced by `fill_value` (be it the function argument or the attribute of ``self``). """ if fill_value is None: fill_value = self.fill_value data = super().filled(fill_value) # Use parent table definition of Column if available column_cls = self.parent_table.Column if (self.parent_table is not None) else Column out = column_cls(name=self.name, data=data, unit=self.unit, format=self.format, description=self.description, meta=deepcopy(self.meta)) return out def insert(self, obj, values, mask=None, axis=0): """ Insert values along the given axis before the given indices and return a new `~astropy.table.MaskedColumn` object. Parameters ---------- obj : int, slice or sequence of int Object that defines the index or indices before which ``values`` is inserted. values : array-like Value(s) to insert. If the type of ``values`` is different from that of the column, ``values`` is converted to the matching type. ``values`` should be shaped so that it can be broadcast appropriately. mask : bool or array-like Mask value(s) to insert. If not supplied, and values does not have a mask either, then False is used. axis : int, optional Axis along which to insert ``values``. If ``axis`` is None then the column array is flattened before insertion. Default is 0, which will insert a row. Returns ------- out : `~astropy.table.MaskedColumn` A copy of column with ``values`` and ``mask`` inserted. Note that the insertion does not occur in-place: a new masked column is returned. """ self_ma = self.data # self viewed as MaskedArray if self.dtype.kind == 'O': # Even if values is array-like (e.g. [1,2,3]), insert as a single # object. Numpy.insert instead inserts each element in an array-like # input individually. new_data = np.insert(self_ma.data, obj, None, axis=axis) new_data[obj] = values else: self_ma = _expand_string_array_for_values(self_ma, values) new_data = np.insert(self_ma.data, obj, values, axis=axis) if mask is None: mask = getattr(values, 'mask', np.ma.nomask) if mask is np.ma.nomask: if self.dtype.kind == 'O': mask = False else: mask = np.zeros(np.shape(values), dtype=bool) new_mask = np.insert(self_ma.mask, obj, mask, axis=axis) new_ma = np.ma.array(new_data, mask=new_mask, copy=False) out = new_ma.view(self.__class__) out.parent_table = None out.indices = [] out._copy_attrs(self) out.fill_value = self.fill_value return out def _copy_attrs_slice(self, out): # Fixes issue #3023: when calling getitem with a MaskedArray subclass # the original object attributes are not copied. if out.__class__ is self.__class__: # TODO: this part is essentially the same as what is done in # __array_finalize__ and could probably be called directly in our # override of __getitem__ in _columns_mixins.pyx). Refactor? if 'info' in self.__dict__: out.info = self.info out.parent_table = None # we need this because __getitem__ does a shallow copy of indices if out.indices is self.indices: out.indices = [] out._copy_attrs(self) return out def __setitem__(self, index, value): # Issue warning for string assignment that truncates ``value`` if self.dtype.char == 'S': value = self._encode_str(value) if issubclass(self.dtype.type, np.character): # Account for a bug in np.ma.MaskedArray setitem. # https://github.com/numpy/numpy/issues/8624 value = np.ma.asanyarray(value, dtype=self.dtype.type) # Check for string truncation after filling masked items with # empty (zero-length) string. Note that filled() does not make # a copy if there are no masked items. self._check_string_truncate(value.filled('')) # update indices self.info.adjust_indices(index, value, len(self)) ma.MaskedArray.__setitem__(self, index, value) # We do this to make the methods show up in the API docs name = BaseColumn.name copy = BaseColumn.copy more = BaseColumn.more pprint = BaseColumn.pprint pformat = BaseColumn.pformat convert_unit_to = BaseColumn.convert_unit_to
e779e71fe5beef1a0a85afe182775be7a7afd958dd55af8b91ebbf90659165c6
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os import sys import re import fnmatch import numpy as np from astropy import log from astropy.utils.console import Getch, color_print, terminal_size, conf from astropy.utils.data_info import dtype_info_name __all__ = [] def default_format_func(format_, val): if isinstance(val, bytes): return val.decode('utf-8', errors='replace') else: return str(val) # The first three functions are helpers for _auto_format_func def _use_str_for_masked_values(format_func): """Wrap format function to trap masked values. String format functions and most user functions will not be able to deal with masked values, so we wrap them to ensure they are passed to str(). """ return lambda format_, val: (str(val) if val is np.ma.masked else format_func(format_, val)) def _possible_string_format_functions(format_): """Iterate through possible string-derived format functions. A string can either be a format specifier for the format built-in, a new-style format string, or an old-style format string. """ yield lambda format_, val: format(val, format_) yield lambda format_, val: format_.format(val) yield lambda format_, val: format_ % val yield lambda format_, val: format_.format(**{k: val[k] for k in val.dtype.names}) def get_auto_format_func( col=None, possible_string_format_functions=_possible_string_format_functions): """ Return a wrapped ``auto_format_func`` function which is used in formatting table columns. This is primarily an internal function but gets used directly in other parts of astropy, e.g. `astropy.io.ascii`. Parameters ---------- col_name : object, optional Hashable object to identify column like id or name. Default is None. possible_string_format_functions : func, optional Function that yields possible string formatting functions (defaults to internal function to do this). Returns ------- Wrapped ``auto_format_func`` function """ def _auto_format_func(format_, val): """Format ``val`` according to ``format_`` for a plain format specifier, old- or new-style format strings, or using a user supplied function. More importantly, determine and cache (in _format_funcs) a function that will do this subsequently. In this way this complicated logic is only done for the first value. Returns the formatted value. """ if format_ is None: return default_format_func(format_, val) if format_ in col.info._format_funcs: return col.info._format_funcs[format_](format_, val) if callable(format_): format_func = lambda format_, val: format_(val) # noqa try: out = format_func(format_, val) if not isinstance(out, str): raise ValueError('Format function for value {} returned {} ' 'instead of string type' .format(val, type(val))) except Exception as err: # For a masked element, the format function call likely failed # to handle it. Just return the string representation for now, # and retry when a non-masked value comes along. if val is np.ma.masked: return str(val) raise ValueError(f'Format function for value {val} failed.') from err # If the user-supplied function handles formatting masked elements, use # it directly. Otherwise, wrap it in a function that traps them. try: format_func(format_, np.ma.masked) except Exception: format_func = _use_str_for_masked_values(format_func) else: # For a masked element, we cannot set string-based format functions yet, # as all tests below will fail. Just return the string representation # of masked for now, and retry when a non-masked value comes along. if val is np.ma.masked: return str(val) for format_func in possible_string_format_functions(format_): try: # Does this string format method work? out = format_func(format_, val) # Require that the format statement actually did something. if out == format_: raise ValueError('the format passed in did nothing.') except Exception: continue else: break else: # None of the possible string functions passed muster. raise ValueError('unable to parse format string {} for its ' 'column.'.format(format_)) # String-based format functions will fail on masked elements; # wrap them in a function that traps them. format_func = _use_str_for_masked_values(format_func) col.info._format_funcs[format_] = format_func return out return _auto_format_func def _get_pprint_include_names(table): """Get the set of names to show in pprint from the table pprint_include_names and pprint_exclude_names attributes. These may be fnmatch unix-style globs. """ def get_matches(name_globs, default): match_names = set() if name_globs: # For None or () use the default for name in table.colnames: for name_glob in name_globs: if fnmatch.fnmatch(name, name_glob): match_names.add(name) break else: match_names.update(default) return match_names include_names = get_matches(table.pprint_include_names(), table.colnames) exclude_names = get_matches(table.pprint_exclude_names(), []) return include_names - exclude_names class TableFormatter: @staticmethod def _get_pprint_size(max_lines=None, max_width=None): """Get the output size (number of lines and character width) for Column and Table pformat/pprint methods. If no value of ``max_lines`` is supplied then the height of the screen terminal is used to set ``max_lines``. If the terminal height cannot be determined then the default will be determined using the ``astropy.table.conf.max_lines`` configuration item. If a negative value of ``max_lines`` is supplied then there is no line limit applied. The same applies for max_width except the configuration item is ``astropy.table.conf.max_width``. Parameters ---------- max_lines : int or None Maximum lines of output (header + data rows) max_width : int or None Maximum width (characters) output Returns ------- max_lines, max_width : int """ # Declare to keep static type checker happy. lines = None width = None if max_lines is None: max_lines = conf.max_lines if max_width is None: max_width = conf.max_width if max_lines is None or max_width is None: lines, width = terminal_size() if max_lines is None: max_lines = lines elif max_lines < 0: max_lines = sys.maxsize if max_lines < 8: max_lines = 8 if max_width is None: max_width = width elif max_width < 0: max_width = sys.maxsize if max_width < 10: max_width = 10 return max_lines, max_width def _pformat_col(self, col, max_lines=None, show_name=True, show_unit=None, show_dtype=False, show_length=None, html=False, align=None): """Return a list of formatted string representation of column values. Parameters ---------- max_lines : int Maximum lines of output (header + data rows) show_name : bool Include column name. Default is True. show_unit : bool Include a header row for unit. Default is to show a row for units only if one or more columns has a defined value for the unit. show_dtype : bool Include column dtype. Default is False. show_length : bool Include column length at end. Default is to show this only if the column is not shown completely. html : bool Output column as HTML align : str Left/right alignment of columns. Default is '>' (right) for all columns. Other allowed values are '<', '^', and '0=' for left, centered, and 0-padded, respectively. Returns ------- lines : list List of lines with formatted column values outs : dict Dict which is used to pass back additional values defined within the iterator. """ if show_unit is None: show_unit = col.info.unit is not None outs = {} # Some values from _pformat_col_iter iterator that are needed here col_strs_iter = self._pformat_col_iter(col, max_lines, show_name=show_name, show_unit=show_unit, show_dtype=show_dtype, show_length=show_length, outs=outs) # Replace tab and newline with text representations so they display nicely. # Newline in particular is a problem in a multicolumn table. col_strs = [val.replace('\t', '\\t').replace('\n', '\\n') for val in col_strs_iter] if len(col_strs) > 0: col_width = max(len(x) for x in col_strs) if html: from astropy.utils.xml.writer import xml_escape n_header = outs['n_header'] for i, col_str in enumerate(col_strs): # _pformat_col output has a header line '----' which is not needed here if i == n_header - 1: continue td = 'th' if i < n_header else 'td' val = f'<{td}>{xml_escape(col_str.strip())}</{td}>' row = ('<tr>' + val + '</tr>') if i < n_header: row = ('<thead>' + row + '</thead>') col_strs[i] = row if n_header > 0: # Get rid of '---' header line col_strs.pop(n_header - 1) col_strs.insert(0, '<table>') col_strs.append('</table>') # Now bring all the column string values to the same fixed width else: col_width = max(len(x) for x in col_strs) if col_strs else 1 # Center line header content and generate dashed headerline for i in outs['i_centers']: col_strs[i] = col_strs[i].center(col_width) if outs['i_dashes'] is not None: col_strs[outs['i_dashes']] = '-' * col_width # Format columns according to alignment. `align` arg has precedent, otherwise # use `col.format` if it starts as a legal alignment string. If neither applies # then right justify. re_fill_align = re.compile(r'(?P<fill>.?)(?P<align>[<^>=])') match = None if align: # If there is an align specified then it must match match = re_fill_align.match(align) if not match: raise ValueError("column align must be one of '<', '^', '>', or '='") elif isinstance(col.info.format, str): # col.info.format need not match, in which case rjust gets used match = re_fill_align.match(col.info.format) if match: fill_char = match.group('fill') align_char = match.group('align') if align_char == '=': if fill_char != '0': raise ValueError("fill character must be '0' for '=' align") fill_char = '' # str.zfill gets used which does not take fill char arg else: fill_char = '' align_char = '>' justify_methods = {'<': 'ljust', '^': 'center', '>': 'rjust', '=': 'zfill'} justify_method = justify_methods[align_char] justify_args = (col_width, fill_char) if fill_char else (col_width,) for i, col_str in enumerate(col_strs): col_strs[i] = getattr(col_str, justify_method)(*justify_args) if outs['show_length']: col_strs.append(f'Length = {len(col)} rows') return col_strs, outs def _name_and_structure(self, name, dtype, sep=" "): """Format a column name, including a possible structure. Normally, just returns the name, but if it has a structured dtype, will add the parts in between square brackets. E.g., "name [f0, f1]" or "name [f0[sf0, sf1], f1]". """ if dtype is None or dtype.names is None: return name structure = ', '.join([self._name_and_structure(name, dt, sep="") for name, (dt, _) in dtype.fields.items()]) return f"{name}{sep}[{structure}]" def _pformat_col_iter(self, col, max_lines, show_name, show_unit, outs, show_dtype=False, show_length=None): """Iterator which yields formatted string representation of column values. Parameters ---------- max_lines : int Maximum lines of output (header + data rows) show_name : bool Include column name. Default is True. show_unit : bool Include a header row for unit. Default is to show a row for units only if one or more columns has a defined value for the unit. outs : dict Must be a dict which is used to pass back additional values defined within the iterator. show_dtype : bool Include column dtype. Default is False. show_length : bool Include column length at end. Default is to show this only if the column is not shown completely. """ max_lines, _ = self._get_pprint_size(max_lines, -1) dtype = getattr(col, 'dtype', None) multidims = getattr(col, 'shape', [0])[1:] if multidims: multidim0 = tuple(0 for n in multidims) multidim1 = tuple(n - 1 for n in multidims) trivial_multidims = np.prod(multidims) == 1 i_dashes = None i_centers = [] # Line indexes where content should be centered n_header = 0 if show_name: i_centers.append(n_header) # Get column name (or 'None' if not set) col_name = str(col.info.name) n_header += 1 yield self._name_and_structure(col_name, dtype) if show_unit: i_centers.append(n_header) n_header += 1 yield str(col.info.unit or '') if show_dtype: i_centers.append(n_header) n_header += 1 if dtype is not None: col_dtype = dtype_info_name((dtype, multidims)) else: col_dtype = col.__class__.__qualname__ or 'object' yield col_dtype if show_unit or show_name or show_dtype: i_dashes = n_header n_header += 1 yield '---' max_lines -= n_header n_print2 = max_lines // 2 n_rows = len(col) # This block of code is responsible for producing the function that # will format values for this column. The ``format_func`` function # takes two args (col_format, val) and returns the string-formatted # version. Some points to understand: # # - col_format could itself be the formatting function, so it will # actually end up being called with itself as the first arg. In # this case the function is expected to ignore its first arg. # # - auto_format_func is a function that gets called on the first # column value that is being formatted. It then determines an # appropriate formatting function given the actual value to be # formatted. This might be deterministic or it might involve # try/except. The latter allows for different string formatting # options like %f or {:5.3f}. When auto_format_func is called it: # 1. Caches the function in the _format_funcs dict so for subsequent # values the right function is called right away. # 2. Returns the formatted value. # # - possible_string_format_functions is a function that yields a # succession of functions that might successfully format the # value. There is a default, but Mixin methods can override this. # See Quantity for an example. # # - get_auto_format_func() returns a wrapped version of auto_format_func # with the column id and possible_string_format_functions as # enclosed variables. col_format = col.info.format or getattr(col.info, 'default_format', None) pssf = (getattr(col.info, 'possible_string_format_functions', None) or _possible_string_format_functions) auto_format_func = get_auto_format_func(col, pssf) format_func = col.info._format_funcs.get(col_format, auto_format_func) if len(col) > max_lines: if show_length is None: show_length = True i0 = n_print2 - (1 if show_length else 0) i1 = n_rows - n_print2 - max_lines % 2 indices = np.concatenate([np.arange(0, i0 + 1), np.arange(i1 + 1, len(col))]) else: i0 = -1 indices = np.arange(len(col)) def format_col_str(idx): if multidims: # Prevents columns like Column(data=[[(1,)],[(2,)]], name='a') # with shape (n,1,...,1) from being printed as if there was # more than one element in a row if trivial_multidims: return format_func(col_format, col[(idx,) + multidim0]) else: left = format_func(col_format, col[(idx,) + multidim0]) right = format_func(col_format, col[(idx,) + multidim1]) return f'{left} .. {right}' else: return format_func(col_format, col[idx]) # Add formatted values if within bounds allowed by max_lines for idx in indices: if idx == i0: yield '...' else: try: yield format_col_str(idx) except ValueError: raise ValueError( 'Unable to parse format string "{}" for entry "{}" ' 'in column "{}"'.format(col_format, col[idx], col.info.name)) outs['show_length'] = show_length outs['n_header'] = n_header outs['i_centers'] = i_centers outs['i_dashes'] = i_dashes def _pformat_table(self, table, max_lines=None, max_width=None, show_name=True, show_unit=None, show_dtype=False, html=False, tableid=None, tableclass=None, align=None): """Return a list of lines for the formatted string representation of the table. Parameters ---------- max_lines : int or None Maximum number of rows to output max_width : int or None Maximum character width of output show_name : bool Include a header row for column names. Default is True. show_unit : bool Include a header row for unit. Default is to show a row for units only if one or more columns has a defined value for the unit. show_dtype : bool Include a header row for column dtypes. Default is to False. html : bool Format the output as an HTML table. Default is False. tableid : str or None An ID tag for the table; only used if html is set. Default is "table{id}", where id is the unique integer id of the table object, id(table) tableclass : str or list of str or None CSS classes for the table; only used if html is set. Default is none align : str or list or tuple Left/right alignment of columns. Default is '>' (right) for all columns. Other allowed values are '<', '^', and '0=' for left, centered, and 0-padded, respectively. A list of strings can be provided for alignment of tables with multiple columns. Returns ------- rows : list Formatted table as a list of strings outs : dict Dict which is used to pass back additional values defined within the iterator. """ # "Print" all the values into temporary lists by column for subsequent # use and to determine the width max_lines, max_width = self._get_pprint_size(max_lines, max_width) if show_unit is None: show_unit = any(col.info.unit for col in table.columns.values()) # Coerce align into a correctly-sized list of alignments (if possible) n_cols = len(table.columns) if align is None or isinstance(align, str): align = [align] * n_cols elif isinstance(align, (list, tuple)): if len(align) != n_cols: raise ValueError('got {} alignment values instead of ' 'the number of columns ({})' .format(len(align), n_cols)) else: raise TypeError('align keyword must be str or list or tuple (got {})' .format(type(align))) # Process column visibility from table pprint_include_names and # pprint_exclude_names attributes and get the set of columns to show. pprint_include_names = _get_pprint_include_names(table) cols = [] outs = None # Initialize so static type checker is happy for align_, col in zip(align, table.columns.values()): if col.info.name not in pprint_include_names: continue lines, outs = self._pformat_col(col, max_lines, show_name=show_name, show_unit=show_unit, show_dtype=show_dtype, align=align_) if outs['show_length']: lines = lines[:-1] cols.append(lines) if not cols: return ['<No columns>'], {'show_length': False} # Use the values for the last column since they are all the same n_header = outs['n_header'] n_rows = len(cols[0]) def outwidth(cols): return sum(len(c[0]) for c in cols) + len(cols) - 1 dots_col = ['...'] * n_rows middle = len(cols) // 2 while outwidth(cols) > max_width: if len(cols) == 1: break if len(cols) == 2: cols[1] = dots_col break if cols[middle] is dots_col: cols.pop(middle) middle = len(cols) // 2 cols[middle] = dots_col # Now "print" the (already-stringified) column values into a # row-oriented list. rows = [] if html: from astropy.utils.xml.writer import xml_escape if tableid is None: tableid = f'table{id(table)}' if tableclass is not None: if isinstance(tableclass, list): tableclass = ' '.join(tableclass) rows.append(f'<table id="{tableid}" class="{tableclass}">') else: rows.append(f'<table id="{tableid}">') for i in range(n_rows): # _pformat_col output has a header line '----' which is not needed here if i == n_header - 1: continue td = 'th' if i < n_header else 'td' vals = (f'<{td}>{xml_escape(col[i].strip())}</{td}>' for col in cols) row = ('<tr>' + ''.join(vals) + '</tr>') if i < n_header: row = ('<thead>' + row + '</thead>') rows.append(row) rows.append('</table>') else: for i in range(n_rows): row = ' '.join(col[i] for col in cols) rows.append(row) return rows, outs def _more_tabcol(self, tabcol, max_lines=None, max_width=None, show_name=True, show_unit=None, show_dtype=False): """Interactive "more" of a table or column. Parameters ---------- max_lines : int or None Maximum number of rows to output max_width : int or None Maximum character width of output show_name : bool Include a header row for column names. Default is True. show_unit : bool Include a header row for unit. Default is to show a row for units only if one or more columns has a defined value for the unit. show_dtype : bool Include a header row for column dtypes. Default is False. """ allowed_keys = 'f br<>qhpn' # Count the header lines n_header = 0 if show_name: n_header += 1 if show_unit: n_header += 1 if show_dtype: n_header += 1 if show_name or show_unit or show_dtype: n_header += 1 # Set up kwargs for pformat call. Only Table gets max_width. kwargs = dict(max_lines=-1, show_name=show_name, show_unit=show_unit, show_dtype=show_dtype) if hasattr(tabcol, 'columns'): # tabcol is a table kwargs['max_width'] = max_width # If max_lines is None (=> query screen size) then increase by 2. # This is because get_pprint_size leaves 6 extra lines so that in # ipython you normally see the last input line. max_lines1, max_width = self._get_pprint_size(max_lines, max_width) if max_lines is None: max_lines1 += 2 delta_lines = max_lines1 - n_header # Set up a function to get a single character on any platform inkey = Getch() i0 = 0 # First table/column row to show showlines = True while True: i1 = i0 + delta_lines # Last table/col row to show if showlines: # Don't always show the table (e.g. after help) try: os.system('cls' if os.name == 'nt' else 'clear') except Exception: pass # No worries if clear screen call fails lines = tabcol[i0:i1].pformat(**kwargs) colors = ('red' if i < n_header else 'default' for i in range(len(lines))) for color, line in zip(colors, lines): color_print(line, color) showlines = True print() print("-- f, <space>, b, r, p, n, <, >, q h (help) --", end=' ') # Get a valid key while True: try: key = inkey().lower() except Exception: print("\n") log.error('Console does not support getting a character' ' as required by more(). Use pprint() instead.') return if key in allowed_keys: break print(key) if key.lower() == 'q': break elif key == ' ' or key == 'f': i0 += delta_lines elif key == 'b': i0 = i0 - delta_lines elif key == 'r': pass elif key == '<': i0 = 0 elif key == '>': i0 = len(tabcol) elif key == 'p': i0 -= 1 elif key == 'n': i0 += 1 elif key == 'h': showlines = False print(""" Browsing keys: f, <space> : forward one page b : back one page r : refresh same page n : next row p : previous row < : go to beginning > : go to end q : quit browsing h : print this help""", end=' ') if i0 < 0: i0 = 0 if i0 >= len(tabcol) - delta_lines: i0 = len(tabcol) - delta_lines print("\n")
744774ebec167e70974691347ab71214d1c79fee42ff233598b3b73d7893ae16
""" High-level table operations: - join() - setdiff() - hstack() - vstack() - dstack() """ # Licensed under a 3-clause BSD style license - see LICENSE.rst from copy import deepcopy import collections import itertools from collections import OrderedDict, Counter from collections.abc import Mapping, Sequence import numpy as np from astropy.utils import metadata from astropy.utils.masked import Masked from .table import Table, QTable, Row, Column, MaskedColumn from astropy.units import Quantity from . import _np_utils from .np_utils import TableMergeError __all__ = ['join', 'setdiff', 'hstack', 'vstack', 'unique', 'join_skycoord', 'join_distance'] __doctest_requires__ = {'join_skycoord': ['scipy'], 'join_distance': ['scipy']} def _merge_table_meta(out, tables, metadata_conflicts='warn'): out_meta = deepcopy(tables[0].meta) for table in tables[1:]: out_meta = metadata.merge(out_meta, table.meta, metadata_conflicts=metadata_conflicts) out.meta.update(out_meta) def _get_list_of_tables(tables): """ Check that tables is a Table or sequence of Tables. Returns the corresponding list of Tables. """ # Make sure we have a list of things if not isinstance(tables, Sequence): tables = [tables] # Make sure there is something to stack if len(tables) == 0: raise ValueError('no values provided to stack.') # Convert inputs (Table, Row, or anything column-like) to Tables. # Special case that Quantity converts to a QTable. for ii, val in enumerate(tables): if isinstance(val, Table): pass elif isinstance(val, Row): tables[ii] = Table(val) elif isinstance(val, Quantity): tables[ii] = QTable([val]) else: try: tables[ii] = Table([val]) except (ValueError, TypeError) as err: raise TypeError(f'Cannot convert {val} to table column.') from err return tables def _get_out_class(objs): """ From a list of input objects ``objs`` get merged output object class. This is just taken as the deepest subclass. This doesn't handle complicated inheritance schemes, but as a special case, classes which share ``info`` are taken to be compatible. """ out_class = objs[0].__class__ for obj in objs[1:]: if issubclass(obj.__class__, out_class): out_class = obj.__class__ if any(not (issubclass(out_class, obj.__class__) or out_class.info is obj.__class__.info) for obj in objs): raise ValueError('unmergeable object classes {}' .format([obj.__class__.__name__ for obj in objs])) return out_class def join_skycoord(distance, distance_func='search_around_sky'): """Helper function to join on SkyCoord columns using distance matching. This function is intended for use in ``table.join()`` to allow performing a table join where the key columns are both ``SkyCoord`` objects, matched by computing the distance between points and accepting values below ``distance``. The distance cross-matching is done using either `~astropy.coordinates.search_around_sky` or `~astropy.coordinates.search_around_3d`, depending on the value of ``distance_func``. The default is ``'search_around_sky'``. One can also provide a function object for ``distance_func``, in which case it must be a function that follows the same input and output API as `~astropy.coordinates.search_around_sky`. In this case the function will be called with ``(skycoord1, skycoord2, distance)`` as arguments. Parameters ---------- distance : `~astropy.units.Quantity` ['angle', 'length'] Maximum distance between points to be considered a join match. Must have angular or distance units. distance_func : str or function Specifies the function for performing the cross-match based on ``distance``. If supplied as a string this specifies the name of a function in `astropy.coordinates`. If supplied as a function then that function is called directly. Returns ------- join_func : function Function that accepts two ``SkyCoord`` columns (col1, col2) and returns the tuple (ids1, ids2) of pair-matched unique identifiers. Examples -------- This example shows an inner join of two ``SkyCoord`` columns, taking any sources within 0.2 deg to be a match. Note the new ``sc_id`` column which is added and provides a unique source identifier for the matches. >>> from astropy.coordinates import SkyCoord >>> import astropy.units as u >>> from astropy.table import Table, join_skycoord >>> from astropy import table >>> sc1 = SkyCoord([0, 1, 1.1, 2], [0, 0, 0, 0], unit='deg') >>> sc2 = SkyCoord([0.5, 1.05, 2.1], [0, 0, 0], unit='deg') >>> join_func = join_skycoord(0.2 * u.deg) >>> join_func(sc1, sc2) # Associate each coordinate with unique source ID (array([3, 1, 1, 2]), array([4, 1, 2])) >>> t1 = Table([sc1], names=['sc']) >>> t2 = Table([sc2], names=['sc']) >>> t12 = table.join(t1, t2, join_funcs={'sc': join_skycoord(0.2 * u.deg)}) >>> print(t12) # Note new `sc_id` column with the IDs from join_func() sc_id sc_1 sc_2 deg,deg deg,deg ----- ------- -------- 1 1.0,0.0 1.05,0.0 1 1.1,0.0 1.05,0.0 2 2.0,0.0 2.1,0.0 """ if isinstance(distance_func, str): import astropy.coordinates as coords try: distance_func = getattr(coords, distance_func) except AttributeError as err: raise ValueError('distance_func must be a function in astropy.coordinates') from err else: from inspect import isfunction if not isfunction(distance_func): raise ValueError('distance_func must be a str or function') def join_func(sc1, sc2): # Call the appropriate SkyCoord method to find pairs within distance idxs1, idxs2, d2d, d3d = distance_func(sc1, sc2, distance) # Now convert that into unique identifiers for each near-pair. This is # taken to be transitive, so that if points 1 and 2 are "near" and points # 1 and 3 are "near", then 1, 2, and 3 are all given the same identifier. # This identifier will then be used in the table join matching. # Identifiers for each column, initialized to all zero. ids1 = np.zeros(len(sc1), dtype=int) ids2 = np.zeros(len(sc2), dtype=int) # Start the identifier count at 1 id_ = 1 for idx1, idx2 in zip(idxs1, idxs2): # If this col1 point is previously identified then set corresponding # col2 point to same identifier. Likewise for col2 and col1. if ids1[idx1] > 0: ids2[idx2] = ids1[idx1] elif ids2[idx2] > 0: ids1[idx1] = ids2[idx2] else: # Not yet seen so set identifier for col1 and col2 ids1[idx1] = id_ ids2[idx2] = id_ id_ += 1 # Fill in unique identifiers for points with no near neighbor for ids in (ids1, ids2): for idx in np.flatnonzero(ids == 0): ids[idx] = id_ id_ += 1 # End of enclosure join_func() return ids1, ids2 return join_func def join_distance(distance, kdtree_args=None, query_args=None): """Helper function to join table columns using distance matching. This function is intended for use in ``table.join()`` to allow performing a table join where the key columns are matched by computing the distance between points and accepting values below ``distance``. This numerical "fuzzy" match can apply to 1-D or 2-D columns, where in the latter case the distance is a vector distance. The distance cross-matching is done using `scipy.spatial.cKDTree`. If necessary you can tweak the default behavior by providing ``dict`` values for the ``kdtree_args`` or ``query_args``. Parameters ---------- distance : float or `~astropy.units.Quantity` ['length'] Maximum distance between points to be considered a join match kdtree_args : dict, None Optional extra args for `~scipy.spatial.cKDTree` query_args : dict, None Optional extra args for `~scipy.spatial.cKDTree.query_ball_tree` Returns ------- join_func : function Function that accepts (skycoord1, skycoord2) and returns the tuple (ids1, ids2) of pair-matched unique identifiers. Examples -------- >>> from astropy.table import Table, join_distance >>> from astropy import table >>> c1 = [0, 1, 1.1, 2] >>> c2 = [0.5, 1.05, 2.1] >>> t1 = Table([c1], names=['col']) >>> t2 = Table([c2], names=['col']) >>> t12 = table.join(t1, t2, join_type='outer', join_funcs={'col': join_distance(0.2)}) >>> print(t12) col_id col_1 col_2 ------ ----- ----- 1 1.0 1.05 1 1.1 1.05 2 2.0 2.1 3 0.0 -- 4 -- 0.5 """ try: from scipy.spatial import cKDTree except ImportError as exc: raise ImportError('scipy is required to use join_distance()') from exc if kdtree_args is None: kdtree_args = {} if query_args is None: query_args = {} def join_func(col1, col2): if col1.ndim > 2 or col2.ndim > 2: raise ValueError('columns for isclose_join must be 1- or 2-dimensional') if isinstance(distance, Quantity): # Convert to np.array with common unit col1 = col1.to_value(distance.unit) col2 = col2.to_value(distance.unit) dist = distance.value else: # Convert to np.array to allow later in-place shape changing col1 = np.asarray(col1) col2 = np.asarray(col2) dist = distance # Ensure columns are pure np.array and are 2-D for use with KDTree if col1.ndim == 1: col1.shape = col1.shape + (1,) if col2.ndim == 1: col2.shape = col2.shape + (1,) # Cross-match col1 and col2 within dist using KDTree kd1 = cKDTree(col1, **kdtree_args) kd2 = cKDTree(col2, **kdtree_args) nears = kd1.query_ball_tree(kd2, r=dist, **query_args) # Output of above is nears which is a list of lists, where the outer # list corresponds to each item in col1, and where the inner lists are # indexes into col2 of elements within the distance tolerance. This # identifies col1 / col2 near pairs. # Now convert that into unique identifiers for each near-pair. This is # taken to be transitive, so that if points 1 and 2 are "near" and points # 1 and 3 are "near", then 1, 2, and 3 are all given the same identifier. # This identifier will then be used in the table join matching. # Identifiers for each column, initialized to all zero. ids1 = np.zeros(len(col1), dtype=int) ids2 = np.zeros(len(col2), dtype=int) # Start the identifier count at 1 id_ = 1 for idx1, idxs2 in enumerate(nears): for idx2 in idxs2: # If this col1 point is previously identified then set corresponding # col2 point to same identifier. Likewise for col2 and col1. if ids1[idx1] > 0: ids2[idx2] = ids1[idx1] elif ids2[idx2] > 0: ids1[idx1] = ids2[idx2] else: # Not yet seen so set identifier for col1 and col2 ids1[idx1] = id_ ids2[idx2] = id_ id_ += 1 # Fill in unique identifiers for points with no near neighbor for ids in (ids1, ids2): for idx in np.flatnonzero(ids == 0): ids[idx] = id_ id_ += 1 # End of enclosure join_func() return ids1, ids2 return join_func def join(left, right, keys=None, join_type='inner', *, keys_left=None, keys_right=None, uniq_col_name='{col_name}_{table_name}', table_names=['1', '2'], metadata_conflicts='warn', join_funcs=None): """ Perform a join of the left table with the right table on specified keys. Parameters ---------- left : `~astropy.table.Table`-like object Left side table in the join. If not a Table, will call ``Table(left)`` right : `~astropy.table.Table`-like object Right side table in the join. If not a Table, will call ``Table(right)`` keys : str or list of str Name(s) of column(s) used to match rows of left and right tables. Default is to use all columns which are common to both tables. join_type : str Join type ('inner' | 'outer' | 'left' | 'right' | 'cartesian'), default is 'inner' keys_left : str or list of str or list of column-like, optional Left column(s) used to match rows instead of ``keys`` arg. This can be be a single left table column name or list of column names, or a list of column-like values with the same lengths as the left table. keys_right : str or list of str or list of column-like, optional Same as ``keys_left``, but for the right side of the join. uniq_col_name : str or None String generate a unique output column name in case of a conflict. The default is '{col_name}_{table_name}'. table_names : list of str or None Two-element list of table names used when generating unique output column names. The default is ['1', '2']. metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. join_funcs : dict, None Dict of functions to use for matching the corresponding key column(s). See `~astropy.table.join_skycoord` for an example and details. Returns ------- joined_table : `~astropy.table.Table` object New table containing the result of the join operation. """ # Try converting inputs to Table as needed if not isinstance(left, Table): left = Table(left) if not isinstance(right, Table): right = Table(right) col_name_map = OrderedDict() out = _join(left, right, keys, join_type, uniq_col_name, table_names, col_name_map, metadata_conflicts, join_funcs, keys_left=keys_left, keys_right=keys_right) # Merge the column and table meta data. Table subclasses might override # these methods for custom merge behavior. _merge_table_meta(out, [left, right], metadata_conflicts=metadata_conflicts) return out def setdiff(table1, table2, keys=None): """ Take a set difference of table rows. The row set difference will contain all rows in ``table1`` that are not present in ``table2``. If the keys parameter is not defined, all columns in ``table1`` will be included in the output table. Parameters ---------- table1 : `~astropy.table.Table` ``table1`` is on the left side of the set difference. table2 : `~astropy.table.Table` ``table2`` is on the right side of the set difference. keys : str or list of str Name(s) of column(s) used to match rows of left and right tables. Default is to use all columns in ``table1``. Returns ------- diff_table : `~astropy.table.Table` New table containing the set difference between tables. If the set difference is none, an empty table will be returned. Examples -------- To get a set difference between two tables:: >>> from astropy.table import setdiff, Table >>> t1 = Table({'a': [1, 4, 9], 'b': ['c', 'd', 'f']}, names=('a', 'b')) >>> t2 = Table({'a': [1, 5, 9], 'b': ['c', 'b', 'f']}, names=('a', 'b')) >>> print(t1) a b --- --- 1 c 4 d 9 f >>> print(t2) a b --- --- 1 c 5 b 9 f >>> print(setdiff(t1, t2)) a b --- --- 4 d >>> print(setdiff(t2, t1)) a b --- --- 5 b """ if keys is None: keys = table1.colnames # Check that all keys are in table1 and table2 for tbl, tbl_str in ((table1, 'table1'), (table2, 'table2')): diff_keys = np.setdiff1d(keys, tbl.colnames) if len(diff_keys) != 0: raise ValueError("The {} columns are missing from {}, cannot take " "a set difference.".format(diff_keys, tbl_str)) # Make a light internal copy of both tables t1 = table1.copy(copy_data=False) t1.meta = {} t1.keep_columns(keys) t1['__index1__'] = np.arange(len(table1)) # Keep track of rows indices # Make a light internal copy to avoid touching table2 t2 = table2.copy(copy_data=False) t2.meta = {} t2.keep_columns(keys) # Dummy column to recover rows after join t2['__index2__'] = np.zeros(len(t2), dtype=np.uint8) # dummy column t12 = _join(t1, t2, join_type='left', keys=keys, metadata_conflicts='silent') # If t12 index2 is masked then that means some rows were in table1 but not table2. if hasattr(t12['__index2__'], 'mask'): # Define bool mask of table1 rows not in table2 diff = t12['__index2__'].mask # Get the row indices of table1 for those rows idx = t12['__index1__'][diff] # Select corresponding table1 rows straight from table1 to ensure # correct table and column types. t12_diff = table1[idx] else: t12_diff = table1[[]] return t12_diff def dstack(tables, join_type='outer', metadata_conflicts='warn'): """ Stack columns within tables depth-wise A ``join_type`` of 'exact' means that the tables must all have exactly the same column names (though the order can vary). If ``join_type`` is 'inner' then the intersection of common columns will be the output. A value of 'outer' (default) means the output will have the union of all columns, with table values being masked where no common values are available. Parameters ---------- tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof Table(s) to stack along depth-wise with the current table Table columns should have same shape and name for depth-wise stacking join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables. Examples -------- To stack two tables along rows do:: >>> from astropy.table import dstack, Table >>> t1 = Table({'a': [1., 2.], 'b': [3., 4.]}, names=('a', 'b')) >>> t2 = Table({'a': [5., 6.], 'b': [7., 8.]}, names=('a', 'b')) >>> print(t1) a b --- --- 1.0 3.0 2.0 4.0 >>> print(t2) a b --- --- 5.0 7.0 6.0 8.0 >>> print(dstack([t1, t2])) a b ---------- ---------- 1.0 .. 5.0 3.0 .. 7.0 2.0 .. 6.0 4.0 .. 8.0 """ _check_join_type(join_type, 'dstack') tables = _get_list_of_tables(tables) if len(tables) == 1: return tables[0] # no point in stacking a single table n_rows = set(len(table) for table in tables) if len(n_rows) != 1: raise ValueError('Table lengths must all match for dstack') n_row = n_rows.pop() out = vstack(tables, join_type, metadata_conflicts) for name, col in out.columns.items(): col = out[name] # Reshape to so each original column is now in a row. # If entries are not 0-dim then those additional shape dims # are just carried along. # [x x x y y y] => [[x x x], # [y y y]] new_shape = (len(tables), n_row) + col.shape[1:] try: col.shape = (len(tables), n_row) + col.shape[1:] except AttributeError: col = col.reshape(new_shape) # Transpose the table and row axes to get to # [[x, y], # [x, y] # [x, y]] axes = np.arange(len(col.shape)) axes[:2] = [1, 0] # This temporarily makes `out` be corrupted (columns of different # length) but it all works out in the end. out.columns.__setitem__(name, col.transpose(axes), validated=True) return out def vstack(tables, join_type='outer', metadata_conflicts='warn'): """ Stack tables vertically (along rows) A ``join_type`` of 'exact' means that the tables must all have exactly the same column names (though the order can vary). If ``join_type`` is 'inner' then the intersection of common columns will be the output. A value of 'outer' (default) means the output will have the union of all columns, with table values being masked where no common values are available. Parameters ---------- tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof Table(s) to stack along rows (vertically) with the current table join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables. Examples -------- To stack two tables along rows do:: >>> from astropy.table import vstack, Table >>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b')) >>> t2 = Table({'a': [5, 6], 'b': [7, 8]}, names=('a', 'b')) >>> print(t1) a b --- --- 1 3 2 4 >>> print(t2) a b --- --- 5 7 6 8 >>> print(vstack([t1, t2])) a b --- --- 1 3 2 4 5 7 6 8 """ _check_join_type(join_type, 'vstack') tables = _get_list_of_tables(tables) # validates input if len(tables) == 1: return tables[0] # no point in stacking a single table col_name_map = OrderedDict() out = _vstack(tables, join_type, col_name_map, metadata_conflicts) # Merge table metadata _merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts) return out def hstack(tables, join_type='outer', uniq_col_name='{col_name}_{table_name}', table_names=None, metadata_conflicts='warn'): """ Stack tables along columns (horizontally) A ``join_type`` of 'exact' means that the tables must all have exactly the same number of rows. If ``join_type`` is 'inner' then the intersection of rows will be the output. A value of 'outer' (default) means the output will have the union of all rows, with table values being masked where no common values are available. Parameters ---------- tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof Tables to stack along columns (horizontally) with the current table join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' uniq_col_name : str or None String generate a unique output column name in case of a conflict. The default is '{col_name}_{table_name}'. table_names : list of str or None Two-element list of table names used when generating unique output column names. The default is ['1', '2', ..]. metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables. See Also -------- Table.add_columns, Table.replace_column, Table.update Examples -------- To stack two tables horizontally (along columns) do:: >>> from astropy.table import Table, hstack >>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b')) >>> t2 = Table({'c': [5, 6], 'd': [7, 8]}, names=('c', 'd')) >>> print(t1) a b --- --- 1 3 2 4 >>> print(t2) c d --- --- 5 7 6 8 >>> print(hstack([t1, t2])) a b c d --- --- --- --- 1 3 5 7 2 4 6 8 """ _check_join_type(join_type, 'hstack') tables = _get_list_of_tables(tables) # validates input if len(tables) == 1: return tables[0] # no point in stacking a single table col_name_map = OrderedDict() out = _hstack(tables, join_type, uniq_col_name, table_names, col_name_map) _merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts) return out def unique(input_table, keys=None, silent=False, keep='first'): """ Returns the unique rows of a table. Parameters ---------- input_table : table-like keys : str or list of str Name(s) of column(s) used to create unique rows. Default is to use all columns. keep : {'first', 'last', 'none'} Whether to keep the first or last row for each set of duplicates. If 'none', all rows that are duplicate are removed, leaving only rows that are already unique in the input. Default is 'first'. silent : bool If `True`, masked value column(s) are silently removed from ``keys``. If `False`, an exception is raised when ``keys`` contains masked value column(s). Default is `False`. Returns ------- unique_table : `~astropy.table.Table` object New table containing only the unique rows of ``input_table``. Examples -------- >>> from astropy.table import unique, Table >>> import numpy as np >>> table = Table(data=[[1,2,3,2,3,3], ... [2,3,4,5,4,6], ... [3,4,5,6,7,8]], ... names=['col1', 'col2', 'col3'], ... dtype=[np.int32, np.int32, np.int32]) >>> table <Table length=6> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 3 4 3 4 5 2 5 6 3 4 7 3 6 8 >>> unique(table, keys='col1') <Table length=3> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 3 4 3 4 5 >>> unique(table, keys=['col1'], keep='last') <Table length=3> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 5 6 3 6 8 >>> unique(table, keys=['col1', 'col2']) <Table length=5> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 3 4 2 5 6 3 4 5 3 6 8 >>> unique(table, keys=['col1', 'col2'], keep='none') <Table length=4> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 3 4 2 5 6 3 6 8 >>> unique(table, keys=['col1'], keep='none') <Table length=1> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 """ if keep not in ('first', 'last', 'none'): raise ValueError("'keep' should be one of 'first', 'last', 'none'") if isinstance(keys, str): keys = [keys] if keys is None: keys = input_table.colnames else: if len(set(keys)) != len(keys): raise ValueError("duplicate key names") # Check for columns with masked values for key in keys[:]: col = input_table[key] if hasattr(col, 'mask') and np.any(col.mask): if not silent: raise ValueError( "cannot use columns with masked values as keys; " "remove column '{}' from keys and rerun " "unique()".format(key)) del keys[keys.index(key)] if len(keys) == 0: raise ValueError("no column remained in ``keys``; " "unique() cannot work with masked value " "key columns") grouped_table = input_table.group_by(keys) indices = grouped_table.groups.indices if keep == 'first': indices = indices[:-1] elif keep == 'last': indices = indices[1:] - 1 else: indices = indices[:-1][np.diff(indices) == 1] return grouped_table[indices] def get_col_name_map(arrays, common_names, uniq_col_name='{col_name}_{table_name}', table_names=None): """ Find the column names mapping when merging the list of tables ``arrays``. It is assumed that col names in ``common_names`` are to be merged into a single column while the rest will be uniquely represented in the output. The args ``uniq_col_name`` and ``table_names`` specify how to rename columns in case of conflicts. Returns a dict mapping each output column name to the input(s). This takes the form {outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names will be present, while for the other non-key columns the value will be (col_name_0, None, ..) or (None, col_name_1, ..) etc. """ col_name_map = collections.defaultdict(lambda: [None] * len(arrays)) col_name_list = [] if table_names is None: table_names = [str(ii + 1) for ii in range(len(arrays))] for idx, array in enumerate(arrays): table_name = table_names[idx] for name in array.colnames: out_name = name if name in common_names: # If name is in the list of common_names then insert into # the column name list, but just once. if name not in col_name_list: col_name_list.append(name) else: # If name is not one of the common column outputs, and it collides # with the names in one of the other arrays, then rename others = list(arrays) others.pop(idx) if any(name in other.colnames for other in others): out_name = uniq_col_name.format(table_name=table_name, col_name=name) col_name_list.append(out_name) col_name_map[out_name][idx] = name # Check for duplicate output column names col_name_count = Counter(col_name_list) repeated_names = [name for name, count in col_name_count.items() if count > 1] if repeated_names: raise TableMergeError('Merging column names resulted in duplicates: {}. ' 'Change uniq_col_name or table_names args to fix this.' .format(repeated_names)) # Convert col_name_map to a regular dict with tuple (immutable) values col_name_map = OrderedDict((name, col_name_map[name]) for name in col_name_list) return col_name_map def get_descrs(arrays, col_name_map): """ Find the dtypes descrs resulting from merging the list of arrays' dtypes, using the column name mapping ``col_name_map``. Return a list of descrs for the output. """ out_descrs = [] for out_name, in_names in col_name_map.items(): # List of input arrays that contribute to this output column in_cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None] # List of names of the columns that contribute to this output column. names = [name for name in in_names if name is not None] # Output dtype is the superset of all dtypes in in_arrays try: dtype = common_dtype(in_cols) except TableMergeError as tme: # Beautify the error message when we are trying to merge columns with incompatible # types by including the name of the columns that originated the error. raise TableMergeError("The '{}' columns have incompatible types: {}" .format(names[0], tme._incompat_types)) from tme # Make sure all input shapes are the same uniq_shapes = set(col.shape[1:] for col in in_cols) if len(uniq_shapes) != 1: raise TableMergeError(f'Key columns {names!r} have different shape') shape = uniq_shapes.pop() if out_name is not None: out_name = str(out_name) out_descrs.append((out_name, dtype, shape)) return out_descrs def common_dtype(cols): """ Use numpy to find the common dtype for a list of columns. Only allow columns within the following fundamental numpy data types: np.bool_, np.object_, np.number, np.character, np.void """ try: return metadata.common_dtype(cols) except metadata.MergeConflictError as err: tme = TableMergeError(f'Columns have incompatible types {err._incompat_types}') tme._incompat_types = err._incompat_types raise tme from err def _get_join_sort_idxs(keys, left, right): # Go through each of the key columns in order and make columns for # a new structured array that represents the lexical ordering of those # key columns. This structured array is then argsort'ed. The trick here # is that some columns (e.g. Time) may need to be expanded into multiple # columns for ordering here. ii = 0 # Index for uniquely naming the sort columns sort_keys_dtypes = [] # sortable_table dtypes as list of (name, dtype_str, shape) tuples sort_keys = [] # sortable_table (structured ndarray) column names sort_left = {} # sortable ndarrays from left table sort_right = {} # sortable ndarray from right table for key in keys: # get_sortable_arrays() returns a list of ndarrays that can be lexically # sorted to represent the order of the column. In most cases this is just # a single element of the column itself. left_sort_cols = left[key].info.get_sortable_arrays() right_sort_cols = right[key].info.get_sortable_arrays() if len(left_sort_cols) != len(right_sort_cols): # Should never happen because cols are screened beforehand for compatibility raise RuntimeError('mismatch in sort cols lengths') for left_sort_col, right_sort_col in zip(left_sort_cols, right_sort_cols): # Check for consistency of shapes. Mismatch should never happen. shape = left_sort_col.shape[1:] if shape != right_sort_col.shape[1:]: raise RuntimeError('mismatch in shape of left vs. right sort array') if shape != (): raise ValueError(f'sort key column {key!r} must be 1-d') sort_key = str(ii) sort_keys.append(sort_key) sort_left[sort_key] = left_sort_col sort_right[sort_key] = right_sort_col # Build up dtypes for the structured array that gets sorted. dtype_str = common_dtype([left_sort_col, right_sort_col]) sort_keys_dtypes.append((sort_key, dtype_str)) ii += 1 # Make the empty sortable table and fill it len_left = len(left) sortable_table = np.empty(len_left + len(right), dtype=sort_keys_dtypes) for key in sort_keys: sortable_table[key][:len_left] = sort_left[key] sortable_table[key][len_left:] = sort_right[key] # Finally do the (lexical) argsort and make a new sorted version idx_sort = sortable_table.argsort(order=sort_keys) sorted_table = sortable_table[idx_sort] # Get indexes of unique elements (i.e. the group boundaries) diffs = np.concatenate(([True], sorted_table[1:] != sorted_table[:-1], [True])) idxs = np.flatnonzero(diffs) return idxs, idx_sort def _apply_join_funcs(left, right, keys, join_funcs): """Apply join_funcs """ # Make light copies of left and right, then add new index columns. left = left.copy(copy_data=False) right = right.copy(copy_data=False) for key, join_func in join_funcs.items(): ids1, ids2 = join_func(left[key], right[key]) # Define a unique id_key name, and keep adding underscores until we have # a name not yet present. id_key = key + '_id' while id_key in left.columns or id_key in right.columns: id_key = id_key[:-2] + '_id' keys = tuple(id_key if orig_key == key else orig_key for orig_key in keys) left.add_column(ids1, index=0, name=id_key) # [id_key] = ids1 right.add_column(ids2, index=0, name=id_key) # [id_key] = ids2 return left, right, keys def _join(left, right, keys=None, join_type='inner', uniq_col_name='{col_name}_{table_name}', table_names=['1', '2'], col_name_map=None, metadata_conflicts='warn', join_funcs=None, keys_left=None, keys_right=None): """ Perform a join of the left and right Tables on specified keys. Parameters ---------- left : Table Left side table in the join right : Table Right side table in the join keys : str or list of str Name(s) of column(s) used to match rows of left and right tables. Default is to use all columns which are common to both tables. join_type : str Join type ('inner' | 'outer' | 'left' | 'right' | 'cartesian'), default is 'inner' uniq_col_name : str or None String generate a unique output column name in case of a conflict. The default is '{col_name}_{table_name}'. table_names : list of str or None Two-element list of table names used when generating unique output column names. The default is ['1', '2']. col_name_map : empty dict or None If passed as a dict then it will be updated in-place with the mapping of output to input column names. metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. join_funcs : dict, None Dict of functions to use for matching the corresponding key column(s). See `~astropy.table.join_skycoord` for an example and details. Returns ------- joined_table : `~astropy.table.Table` object New table containing the result of the join operation. """ # Store user-provided col_name_map until the end _col_name_map = col_name_map # Special column name for cartesian join, should never collide with real column cartesian_index_name = '__table_cartesian_join_temp_index__' if join_type not in ('inner', 'outer', 'left', 'right', 'cartesian'): raise ValueError("The 'join_type' argument should be in 'inner', " "'outer', 'left', 'right', or 'cartesian' " "(got '{}' instead)". format(join_type)) if join_type == 'cartesian': if keys: raise ValueError('cannot supply keys for a cartesian join') if join_funcs: raise ValueError('cannot supply join_funcs for a cartesian join') # Make light copies of left and right, then add temporary index columns # with all the same value so later an outer join turns into a cartesian join. left = left.copy(copy_data=False) right = right.copy(copy_data=False) left[cartesian_index_name] = np.uint8(0) right[cartesian_index_name] = np.uint8(0) keys = (cartesian_index_name, ) # Handle the case of join key columns that are different between left and # right via keys_left/keys_right args. This is done by saving the original # input tables and making new left and right tables that contain only the # key cols but with common column names ['0', '1', etc]. This sets `keys` to # those fake key names in the left and right tables if keys_left is not None or keys_right is not None: left_orig = left right_orig = right left, right, keys = _join_keys_left_right( left, right, keys, keys_left, keys_right, join_funcs) if keys is None: keys = tuple(name for name in left.colnames if name in right.colnames) if len(keys) == 0: raise TableMergeError('No keys in common between left and right tables') elif isinstance(keys, str): # If we have a single key, put it in a tuple keys = (keys,) # Check the key columns for arr, arr_label in ((left, 'Left'), (right, 'Right')): for name in keys: if name not in arr.colnames: raise TableMergeError('{} table does not have key column {!r}' .format(arr_label, name)) if hasattr(arr[name], 'mask') and np.any(arr[name].mask): raise TableMergeError('{} key column {!r} has missing values' .format(arr_label, name)) if join_funcs is not None: if not all(key in keys for key in join_funcs): raise ValueError(f'join_funcs keys {join_funcs.keys()} must be a ' f'subset of join keys {keys}') left, right, keys = _apply_join_funcs(left, right, keys, join_funcs) len_left, len_right = len(left), len(right) if len_left == 0 or len_right == 0: raise ValueError('input tables for join must both have at least one row') try: idxs, idx_sort = _get_join_sort_idxs(keys, left, right) except NotImplementedError: raise TypeError('one or more key columns are not sortable') # Now that we have idxs and idx_sort, revert to the original table args to # carry on with making the output joined table. `keys` is set to to an empty # list so that all original left and right columns are included in the # output table. if keys_left is not None or keys_right is not None: keys = [] left = left_orig right = right_orig # Joined array dtype as a list of descr (name, type_str, shape) tuples col_name_map = get_col_name_map([left, right], keys, uniq_col_name, table_names) out_descrs = get_descrs([left, right], col_name_map) # Main inner loop in Cython to compute the cartesian product # indices for the given join type int_join_type = {'inner': 0, 'outer': 1, 'left': 2, 'right': 3, 'cartesian': 1}[join_type] masked, n_out, left_out, left_mask, right_out, right_mask = \ _np_utils.join_inner(idxs, idx_sort, len_left, int_join_type) out = _get_out_class([left, right])() for out_name, dtype, shape in out_descrs: if out_name == cartesian_index_name: continue left_name, right_name = col_name_map[out_name] if left_name and right_name: # this is a key which comes from left and right cols = [left[left_name], right[right_name]] col_cls = _get_out_class(cols) if not hasattr(col_cls.info, 'new_like'): raise NotImplementedError('join unavailable for mixin column type(s): {}' .format(col_cls.__name__)) out[out_name] = col_cls.info.new_like(cols, n_out, metadata_conflicts, out_name) out[out_name][:] = np.where(right_mask, left[left_name].take(left_out), right[right_name].take(right_out)) continue elif left_name: # out_name came from the left table name, array, array_out, array_mask = left_name, left, left_out, left_mask elif right_name: name, array, array_out, array_mask = right_name, right, right_out, right_mask else: raise TableMergeError('Unexpected column names (maybe one is ""?)') # Select the correct elements from the original table col = array[name][array_out] # If the output column is masked then set the output column masking # accordingly. Check for columns that don't support a mask attribute. if masked and np.any(array_mask): # If col is a Column but not MaskedColumn then upgrade at this point # because masking is required. if isinstance(col, Column) and not isinstance(col, MaskedColumn): col = out.MaskedColumn(col, copy=False) if isinstance(col, Quantity) and not isinstance(col, Masked): col = Masked(col, copy=False) # array_mask is 1-d corresponding to length of output column. We need # make it have the correct shape for broadcasting, i.e. (length, 1, 1, ..). # Mixin columns might not have ndim attribute so use len(col.shape). array_mask.shape = (col.shape[0],) + (1,) * (len(col.shape) - 1) # Now broadcast to the correct final shape array_mask = np.broadcast_to(array_mask, col.shape) try: col[array_mask] = col.info.mask_val except Exception as err: # Not clear how different classes will fail here raise NotImplementedError( "join requires masking column '{}' but column" " type {} does not support masking" .format(out_name, col.__class__.__name__)) from err # Set the output table column to the new joined column out[out_name] = col # If col_name_map supplied as a dict input, then update. if isinstance(_col_name_map, Mapping): _col_name_map.update(col_name_map) return out def _join_keys_left_right(left, right, keys, keys_left, keys_right, join_funcs): """Do processing to handle keys_left / keys_right args for join. This takes the keys_left/right inputs and turns them into a list of left/right columns corresponding to those inputs (which can be column names or column data values). It also generates the list of fake key column names (strings of "1", "2", etc.) that correspond to the input keys. """ def _keys_to_cols(keys, table, label): # Process input `keys`, which is a str or list of str column names in # `table` or a list of column-like objects. The `label` is just for # error reporting. if isinstance(keys, str): keys = [keys] cols = [] for key in keys: if isinstance(key, str): try: cols.append(table[key]) except KeyError: raise ValueError(f'{label} table does not have key column {key!r}') else: if len(key) != len(table): raise ValueError(f'{label} table has different length from key {key}') cols.append(key) return cols if join_funcs is not None: raise ValueError('cannot supply join_funcs arg and keys_left / keys_right') if keys_left is None or keys_right is None: raise ValueError('keys_left and keys_right must both be provided') if keys is not None: raise ValueError('keys arg must be None if keys_left and keys_right are supplied') cols_left = _keys_to_cols(keys_left, left, 'left') cols_right = _keys_to_cols(keys_right, right, 'right') if len(cols_left) != len(cols_right): raise ValueError('keys_left and keys_right args must have same length') # Make two new temp tables for the join with only the join columns and # key columns in common. keys = [f'{ii}' for ii in range(len(cols_left))] left = left.__class__(cols_left, names=keys, copy=False) right = right.__class__(cols_right, names=keys, copy=False) return left, right, keys def _check_join_type(join_type, func_name): """Check join_type arg in hstack and vstack. This specifically checks for the common mistake of call vstack(t1, t2) instead of vstack([t1, t2]). The subsequent check of ``join_type in ('inner', ..)`` does not raise in this case. """ if not isinstance(join_type, str): msg = '`join_type` arg must be a string' if isinstance(join_type, Table): msg += ('. Did you accidentally ' f'call {func_name}(t1, t2, ..) instead of ' f'{func_name}([t1, t2], ..)?') raise TypeError(msg) if join_type not in ('inner', 'exact', 'outer'): raise ValueError("`join_type` arg must be one of 'inner', 'exact' or 'outer'") def _vstack(arrays, join_type='outer', col_name_map=None, metadata_conflicts='warn'): """ Stack Tables vertically (by rows) A ``join_type`` of 'exact' (default) means that the arrays must all have exactly the same column names (though the order can vary). If ``join_type`` is 'inner' then the intersection of common columns will be the output. A value of 'outer' means the output will have the union of all columns, with array values being masked where no common values are available. Parameters ---------- arrays : list of Tables Tables to stack by rows (vertically) join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' col_name_map : empty dict or None If passed as a dict then it will be updated in-place with the mapping of output to input column names. Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables. """ # Store user-provided col_name_map until the end _col_name_map = col_name_map # Trivial case of one input array if len(arrays) == 1: return arrays[0] # Start by assuming an outer match where all names go to output names = set(itertools.chain(*[arr.colnames for arr in arrays])) col_name_map = get_col_name_map(arrays, names) # If require_match is True then the output must have exactly the same # number of columns as each input array if join_type == 'exact': for names in col_name_map.values(): if any(x is None for x in names): raise TableMergeError('Inconsistent columns in input arrays ' "(use 'inner' or 'outer' join_type to " "allow non-matching columns)") join_type = 'outer' # For an inner join, keep only columns where all input arrays have that column if join_type == 'inner': col_name_map = OrderedDict((name, in_names) for name, in_names in col_name_map.items() if all(x is not None for x in in_names)) if len(col_name_map) == 0: raise TableMergeError('Input arrays have no columns in common') lens = [len(arr) for arr in arrays] n_rows = sum(lens) out = _get_out_class(arrays)() for out_name, in_names in col_name_map.items(): # List of input arrays that contribute to this output column cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None] col_cls = _get_out_class(cols) if not hasattr(col_cls.info, 'new_like'): raise NotImplementedError('vstack unavailable for mixin column type(s): {}' .format(col_cls.__name__)) try: col = col_cls.info.new_like(cols, n_rows, metadata_conflicts, out_name) except metadata.MergeConflictError as err: # Beautify the error message when we are trying to merge columns with incompatible # types by including the name of the columns that originated the error. raise TableMergeError("The '{}' columns have incompatible types: {}" .format(out_name, err._incompat_types)) from err idx0 = 0 for name, array in zip(in_names, arrays): idx1 = idx0 + len(array) if name in array.colnames: col[idx0:idx1] = array[name] else: # If col is a Column but not MaskedColumn then upgrade at this point # because masking is required. if isinstance(col, Column) and not isinstance(col, MaskedColumn): col = out.MaskedColumn(col, copy=False) if isinstance(col, Quantity) and not isinstance(col, Masked): col = Masked(col, copy=False) try: col[idx0:idx1] = col.info.mask_val except Exception as err: raise NotImplementedError( "vstack requires masking column '{}' but column" " type {} does not support masking" .format(out_name, col.__class__.__name__)) from err idx0 = idx1 out[out_name] = col # If col_name_map supplied as a dict input, then update. if isinstance(_col_name_map, Mapping): _col_name_map.update(col_name_map) return out def _hstack(arrays, join_type='outer', uniq_col_name='{col_name}_{table_name}', table_names=None, col_name_map=None): """ Stack tables horizontally (by columns) A ``join_type`` of 'exact' (default) means that the arrays must all have exactly the same number of rows. If ``join_type`` is 'inner' then the intersection of rows will be the output. A value of 'outer' means the output will have the union of all rows, with array values being masked where no common values are available. Parameters ---------- arrays : List of tables Tables to stack by columns (horizontally) join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' uniq_col_name : str or None String generate a unique output column name in case of a conflict. The default is '{col_name}_{table_name}'. table_names : list of str or None Two-element list of table names used when generating unique output column names. The default is ['1', '2', ..]. Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables. """ # Store user-provided col_name_map until the end _col_name_map = col_name_map if table_names is None: table_names = [f'{ii + 1}' for ii in range(len(arrays))] if len(arrays) != len(table_names): raise ValueError('Number of arrays must match number of table_names') # Trivial case of one input arrays if len(arrays) == 1: return arrays[0] col_name_map = get_col_name_map(arrays, [], uniq_col_name, table_names) # If require_match is True then all input arrays must have the same length arr_lens = [len(arr) for arr in arrays] if join_type == 'exact': if len(set(arr_lens)) > 1: raise TableMergeError("Inconsistent number of rows in input arrays " "(use 'inner' or 'outer' join_type to allow " "non-matching rows)") join_type = 'outer' # For an inner join, keep only the common rows if join_type == 'inner': min_arr_len = min(arr_lens) if len(set(arr_lens)) > 1: arrays = [arr[:min_arr_len] for arr in arrays] arr_lens = [min_arr_len for arr in arrays] # If there are any output rows where one or more input arrays are missing # then the output must be masked. If any input arrays are masked then # output is masked. n_rows = max(arr_lens) out = _get_out_class(arrays)() for out_name, in_names in col_name_map.items(): for name, array, arr_len in zip(in_names, arrays, arr_lens): if name is None: continue if n_rows > arr_len: indices = np.arange(n_rows) indices[arr_len:] = 0 col = array[name][indices] # If col is a Column but not MaskedColumn then upgrade at this point # because masking is required. if isinstance(col, Column) and not isinstance(col, MaskedColumn): col = out.MaskedColumn(col, copy=False) if isinstance(col, Quantity) and not isinstance(col, Masked): col = Masked(col, copy=False) try: col[arr_len:] = col.info.mask_val except Exception as err: raise NotImplementedError( "hstack requires masking column '{}' but column" " type {} does not support masking" .format(out_name, col.__class__.__name__)) from err else: col = array[name][:n_rows] out[out_name] = col # If col_name_map supplied as a dict input, then update. if isinstance(_col_name_map, Mapping): _col_name_map.update(col_name_map) return out
abd1acdb8da41d1e308e8d480fbc0144bb9bd1a25742da2162b9c429109b4b9f
# Licensed under a 3-clause BSD style license - see LICENSE.rst from importlib import import_module from copy import deepcopy from collections import OrderedDict import numpy as np from astropy.utils.data_info import MixinInfo from .column import Column, MaskedColumn from .table import Table, QTable, has_info_class from astropy.units.quantity import QuantityInfo # TODO: some of this might be better done programmatically, through # code like # __construct_mixin_classes += tuple( # f'astropy.coordinates.representation.{cls.__name__}' # for cls in (list(coorep.REPRESENTATION_CLASSES.values()) # + list(coorep.DIFFERENTIAL_CLASSES.values())) # if cls.__name__ in coorep.__all__) # However, to avoid very hard to track import issues, the definition # should then be done at the point where it is actually needed, # using local imports. See also # https://github.com/astropy/astropy/pull/10210#discussion_r419087286 __construct_mixin_classes = ( 'astropy.time.core.Time', 'astropy.time.core.TimeDelta', 'astropy.units.quantity.Quantity', 'astropy.units.function.logarithmic.Magnitude', 'astropy.units.function.logarithmic.Decibel', 'astropy.units.function.logarithmic.Dex', 'astropy.coordinates.angles.Latitude', 'astropy.coordinates.angles.Longitude', 'astropy.coordinates.angles.Angle', 'astropy.coordinates.distances.Distance', 'astropy.coordinates.earth.EarthLocation', 'astropy.coordinates.sky_coordinate.SkyCoord', 'astropy.table.ndarray_mixin.NdarrayMixin', 'astropy.table.table_helpers.ArrayWrapper', 'astropy.table.column.Column', 'astropy.table.column.MaskedColumn', 'astropy.coordinates.representation.CartesianRepresentation', 'astropy.coordinates.representation.UnitSphericalRepresentation', 'astropy.coordinates.representation.RadialRepresentation', 'astropy.coordinates.representation.SphericalRepresentation', 'astropy.coordinates.representation.PhysicsSphericalRepresentation', 'astropy.coordinates.representation.CylindricalRepresentation', 'astropy.coordinates.representation.CartesianDifferential', 'astropy.coordinates.representation.UnitSphericalDifferential', 'astropy.coordinates.representation.SphericalDifferential', 'astropy.coordinates.representation.UnitSphericalCosLatDifferential', 'astropy.coordinates.representation.SphericalCosLatDifferential', 'astropy.coordinates.representation.RadialDifferential', 'astropy.coordinates.representation.PhysicsSphericalDifferential', 'astropy.coordinates.representation.CylindricalDifferential', 'astropy.utils.masked.core.MaskedNDArray', ) class SerializedColumnInfo(MixinInfo): """ Minimal info to allow SerializedColumn to be recognized as a mixin Column. Used to help create a dict of columns in ColumnInfo for structured data. """ def _represent_as_dict(self): # SerializedColumn is already a `dict`, so we can return it directly. return self._parent class SerializedColumn(dict): """Subclass of dict used to serialize mixin columns. It is used in the representation to contain the name and possible other info for a mixin column or attribute (either primary data or an array-like attribute) that is serialized as a column in the table. """ info = SerializedColumnInfo() @property def shape(self): """Minimal shape implementation to allow use as a mixin column. Returns the shape of the first item that has a shape at all, or ``()`` if none of the values has a shape attribute. """ return next((value.shape for value in self.values() if hasattr(value, 'shape')), ()) def _represent_mixin_as_column(col, name, new_cols, mixin_cols, exclude_classes=()): """Carry out processing needed to serialize ``col`` in an output table consisting purely of plain ``Column`` or ``MaskedColumn`` columns. This relies on the object determine if any transformation is required and may depend on the ``serialize_method`` and ``serialize_context`` context variables. For instance a ``MaskedColumn`` may be stored directly to FITS, but can also be serialized as separate data and mask columns. This function builds up a list of plain columns in the ``new_cols`` arg (which is passed as a persistent list). This includes both plain columns from the original table and plain columns that represent data from serialized columns (e.g. ``jd1`` and ``jd2`` arrays from a ``Time`` column). For serialized columns the ``mixin_cols`` dict is updated with required attributes and information to subsequently reconstruct the table. Table mixin columns are always serialized and get represented by one or more data columns. In earlier versions of the code *only* mixin columns were serialized, hence the use within this code of "mixin" to imply serialization. Starting with version 3.1, the non-mixin ``MaskedColumn`` can also be serialized. """ obj_attrs = col.info._represent_as_dict() # If serialization is not required (see function docstring above) # or explicitly specified as excluded, then treat as a normal column. if not obj_attrs or col.__class__ in exclude_classes: new_cols.append(col) return # Subtlety here is handling mixin info attributes. The basic list of such # attributes is: 'name', 'unit', 'dtype', 'format', 'description', 'meta'. # - name: handled directly [DON'T store] # - unit: DON'T store if this is a parent attribute # - dtype: captured in plain Column if relevant [DON'T store] # - format: possibly irrelevant but settable post-object creation [DO store] # - description: DO store # - meta: DO store info = {} for attr, nontrivial in (('unit', lambda x: x is not None and x != ''), ('format', lambda x: x is not None), ('description', lambda x: x is not None), ('meta', lambda x: x)): col_attr = getattr(col.info, attr) if nontrivial(col_attr): info[attr] = col_attr # Find column attributes that have the same length as the column itself. # These will be stored in the table as new columns (aka "data attributes"). # Examples include SkyCoord.ra (what is typically considered the data and is # always an array) and Skycoord.obs_time (which can be a scalar or an # array). data_attrs = [key for key, value in obj_attrs.items() if getattr(value, 'shape', ())[:1] == col.shape[:1]] for data_attr in data_attrs: data = obj_attrs[data_attr] # New column name combines the old name and attribute # (e.g. skycoord.ra, skycoord.dec).unless it is the primary data # attribute for the column (e.g. value for Quantity or data for # MaskedColumn). For primary data, we attempt to store any info on # the format, etc., on the column, but not for ancillary data (e.g., # no sense to use a float format for a mask). is_primary = data_attr == col.info._represent_as_dict_primary_data if is_primary: new_name = name new_info = info else: new_name = name + '.' + data_attr new_info = {} if not has_info_class(data, MixinInfo): col_cls = MaskedColumn if (hasattr(data, 'mask') and np.any(data.mask)) else Column data = col_cls(data, name=new_name, **new_info) if is_primary: # Don't store info in the __serialized_columns__ dict for this column # since this is redundant with info stored on the new column. info = {} # Recurse. If this is anything that needs further serialization (i.e., # a Mixin column, a structured Column, a MaskedColumn for which mask is # stored, etc.), it will define obj_attrs[new_name]. Otherwise, it will # just add to new_cols and all we have to do is to link to the new name. _represent_mixin_as_column(data, new_name, new_cols, obj_attrs) obj_attrs[data_attr] = SerializedColumn(obj_attrs.pop(new_name, {'name': new_name})) # Strip out from info any attributes defined by the parent, # and store whatever remains. for attr in col.info.attrs_from_parent: if attr in info: del info[attr] if info: obj_attrs['__info__'] = info # Store the fully qualified class name if not isinstance(col, SerializedColumn): obj_attrs.setdefault('__class__', col.__module__ + '.' + col.__class__.__name__) mixin_cols[name] = obj_attrs def represent_mixins_as_columns(tbl, exclude_classes=()): """Represent input Table ``tbl`` using only `~astropy.table.Column` or `~astropy.table.MaskedColumn` objects. This function represents any mixin columns like `~astropy.time.Time` in ``tbl`` to one or more plain ``~astropy.table.Column`` objects and returns a new Table. A single mixin column may be split into multiple column components as needed for fully representing the column. This includes the possibility of recursive splitting, as shown in the example below. The new column names are formed as ``<column_name>.<component>``, e.g. ``sc.ra`` for a `~astropy.coordinates.SkyCoord` column named ``sc``. In addition to splitting columns, this function updates the table ``meta`` dictionary to include a dict named ``__serialized_columns__`` which provides additional information needed to construct the original mixin columns from the split columns. This function is used by astropy I/O when writing tables to ECSV, FITS, HDF5 formats. Note that if the table does not include any mixin columns then the original table is returned with no update to ``meta``. Parameters ---------- tbl : `~astropy.table.Table` or subclass Table to represent mixins as Columns exclude_classes : tuple of class Exclude any mixin columns which are instannces of any classes in the tuple Returns ------- tbl : `~astropy.table.Table` New Table with updated columns, or else the original input ``tbl`` Examples -------- >>> from astropy.table import Table, represent_mixins_as_columns >>> from astropy.time import Time >>> from astropy.coordinates import SkyCoord >>> x = [100.0, 200.0] >>> obstime = Time([1999.0, 2000.0], format='jyear') >>> sc = SkyCoord([1, 2], [3, 4], unit='deg', obstime=obstime) >>> tbl = Table([sc, x], names=['sc', 'x']) >>> represent_mixins_as_columns(tbl) <Table length=2> sc.ra sc.dec sc.obstime.jd1 sc.obstime.jd2 x deg deg float64 float64 float64 float64 float64 ------- ------- -------------- -------------- ------- 1.0 3.0 2451180.0 -0.25 100.0 2.0 4.0 2451545.0 0.0 200.0 """ # Dict of metadata for serializing each column, keyed by column name. # Gets filled in place by _represent_mixin_as_column(). mixin_cols = {} # List of columns for the output table. For plain Column objects # this will just be the original column object. new_cols = [] # Go through table columns and represent each column as one or more # plain Column objects (in new_cols) + metadata (in mixin_cols). for col in tbl.itercols(): _represent_mixin_as_column(col, col.info.name, new_cols, mixin_cols, exclude_classes=exclude_classes) # If no metadata was created then just return the original table. if mixin_cols: meta = deepcopy(tbl.meta) meta['__serialized_columns__'] = mixin_cols out = Table(new_cols, meta=meta, copy=False) else: out = tbl for col in out.itercols(): if not isinstance(col, Column) and col.__class__ not in exclude_classes: # This catches columns for which info has not been set up right and # therefore were not converted. See the corresponding test in # test_mixin.py for an example. raise TypeError( 'failed to represent column ' f'{col.info.name!r} ({col.__class__.__name__}) as one ' 'or more Column subclasses. This looks like a mixin class ' 'that does not have the correct _represent_as_dict() method ' 'in the class `info` attribute.') return out def _construct_mixin_from_obj_attrs_and_info(obj_attrs, info): # If this is a supported class then import the class and run # the _construct_from_col method. Prevent accidentally running # untrusted code by only importing known astropy classes. cls_full_name = obj_attrs.pop('__class__', None) if cls_full_name is None: # We're dealing with a SerializedColumn holding columns, stored in # obj_attrs. For this case, info holds the name (and nothing else). mixin = SerializedColumn(obj_attrs) mixin.info.name = info['name'] return mixin if cls_full_name not in __construct_mixin_classes: raise ValueError(f'unsupported class for construct {cls_full_name}') mod_name, _, cls_name = cls_full_name.rpartition('.') module = import_module(mod_name) cls = getattr(module, cls_name) for attr, value in info.items(): if attr in cls.info.attrs_from_parent: obj_attrs[attr] = value mixin = cls.info._construct_from_dict(obj_attrs) for attr, value in info.items(): if attr not in obj_attrs: setattr(mixin.info, attr, value) return mixin class _TableLite(OrderedDict): """ Minimal table-like object for _construct_mixin_from_columns. This allows manipulating the object like a Table but without the actual overhead for a full Table. More pressing, there is an issue with constructing MaskedColumn, where the encoded Column components (data, mask) are turned into a MaskedColumn. When this happens in a real table then all other columns are immediately Masked and a warning is issued. This is not desirable. """ def add_column(self, col, index=0): colnames = self.colnames self[col.info.name] = col for ii, name in enumerate(colnames): if ii >= index: self.move_to_end(name) @property def colnames(self): return list(self.keys()) def itercols(self): return self.values() def _construct_mixin_from_columns(new_name, obj_attrs, out): data_attrs_map = {} for name, val in obj_attrs.items(): if isinstance(val, SerializedColumn): # A SerializedColumn can just link to a serialized column using a name # (e.g., time.jd1), or itself be a mixin (e.g., coord.obstime). Note # that in principle a mixin could have include a column called 'name', # hence we check whether the value is actually a string (see gh-13232). if 'name' in val and isinstance(val['name'], str): data_attrs_map[val['name']] = name else: out_name = f'{new_name}.{name}' _construct_mixin_from_columns(out_name, val, out) data_attrs_map[out_name] = name for name in data_attrs_map.values(): del obj_attrs[name] # The order of data_attrs_map may not match the actual order, as it is set # by the yaml description. So, sort names by position in the serialized table. # Keep the index of the first column, so we can insert the new one there later. names = sorted(data_attrs_map, key=out.colnames.index) idx = out.colnames.index(names[0]) # Name is the column name in the table (e.g. "coord.ra") and # data_attr is the object attribute name (e.g. "ra"). A different # example would be a formatted time object that would have (e.g.) # "time_col" and "value", respectively. for name in names: obj_attrs[data_attrs_map[name]] = out[name] del out[name] info = obj_attrs.pop('__info__', {}) if len(names) == 1: # col is the first and only serialized column; in that case, use info # stored on the column. First step is to get that first column which # has been moved from `out` to `obj_attrs` above. col = obj_attrs[data_attrs_map[name]] # Now copy the relevant attributes for attr, nontrivial in (('unit', lambda x: x not in (None, '')), ('format', lambda x: x is not None), ('description', lambda x: x is not None), ('meta', lambda x: x)): col_attr = getattr(col.info, attr) if nontrivial(col_attr): info[attr] = col_attr info['name'] = new_name col = _construct_mixin_from_obj_attrs_and_info(obj_attrs, info) out.add_column(col, index=idx) def _construct_mixins_from_columns(tbl): if '__serialized_columns__' not in tbl.meta: return tbl meta = tbl.meta.copy() mixin_cols = meta.pop('__serialized_columns__') out = _TableLite(tbl.columns) for new_name, obj_attrs in mixin_cols.items(): _construct_mixin_from_columns(new_name, obj_attrs, out) # If no quantity subclasses are in the output then output as Table. # For instance ascii.read(file, format='ecsv') doesn't specify an # output class and should return the minimal table class that # represents the table file. has_quantities = any(isinstance(col.info, QuantityInfo) for col in out.itercols()) out_cls = QTable if has_quantities else Table return out_cls(list(out.values()), names=out.colnames, copy=False, meta=meta)
74dad99dc6fe96e523916922846c63473d3a15dd9813a0559c881d6ee76da85e
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ The SCEngine class uses the ``sortedcontainers`` package to implement an Index engine for Tables. """ from collections import OrderedDict from itertools import starmap from astropy.utils.compat.optional_deps import HAS_SORTEDCONTAINERS if HAS_SORTEDCONTAINERS: from sortedcontainers import SortedList class Node(object): __slots__ = ('key', 'value') def __init__(self, key, value): self.key = key self.value = value def __lt__(self, other): if other.__class__ is Node: return (self.key, self.value) < (other.key, other.value) return self.key < other def __le__(self, other): if other.__class__ is Node: return (self.key, self.value) <= (other.key, other.value) return self.key <= other def __eq__(self, other): if other.__class__ is Node: return (self.key, self.value) == (other.key, other.value) return self.key == other def __ne__(self, other): if other.__class__ is Node: return (self.key, self.value) != (other.key, other.value) return self.key != other def __gt__(self, other): if other.__class__ is Node: return (self.key, self.value) > (other.key, other.value) return self.key > other def __ge__(self, other): if other.__class__ is Node: return (self.key, self.value) >= (other.key, other.value) return self.key >= other __hash__ = None def __repr__(self): return f'Node({self.key!r}, {self.value!r})' class SCEngine: ''' Fast tree-based implementation for indexing, using the ``sortedcontainers`` package. Parameters ---------- data : Table Sorted columns of the original table row_index : Column object Row numbers corresponding to data columns unique : bool Whether the values of the index must be unique. Defaults to False. ''' def __init__(self, data, row_index, unique=False): if not HAS_SORTEDCONTAINERS: raise ImportError("sortedcontainers is needed for using SCEngine") node_keys = map(tuple, data) self._nodes = SortedList(starmap(Node, zip(node_keys, row_index))) self._unique = unique def add(self, key, value): ''' Add a key, value pair. ''' if self._unique and (key in self._nodes): message = f'duplicate {key!r} in unique index' raise ValueError(message) self._nodes.add(Node(key, value)) def find(self, key): ''' Find rows corresponding to the given key. ''' return [node.value for node in self._nodes.irange(key, key)] def remove(self, key, data=None): ''' Remove data from the given key. ''' if data is not None: item = Node(key, data) try: self._nodes.remove(item) except ValueError: return False return True items = list(self._nodes.irange(key, key)) for item in items: self._nodes.remove(item) return bool(items) def shift_left(self, row): ''' Decrement rows larger than the given row. ''' for node in self._nodes: if node.value > row: node.value -= 1 def shift_right(self, row): ''' Increment rows greater than or equal to the given row. ''' for node in self._nodes: if node.value >= row: node.value += 1 def items(self): ''' Return a list of key, data tuples. ''' result = OrderedDict() for node in self._nodes: if node.key in result: result[node.key].append(node.value) else: result[node.key] = [node.value] return result.items() def sort(self): ''' Make row order align with key order. ''' for index, node in enumerate(self._nodes): node.value = index def sorted_data(self): ''' Return a list of rows in order sorted by key. ''' return [node.value for node in self._nodes] def range(self, lower, upper, bounds=(True, True)): ''' Return row values in the given range. ''' iterator = self._nodes.irange(lower, upper, bounds) return [node.value for node in iterator] def replace_rows(self, row_map): ''' Replace rows with the values in row_map. ''' nodes = [node for node in self._nodes if node.value in row_map] for node in nodes: node.value = row_map[node.value] self._nodes.clear() self._nodes.update(nodes) def __repr__(self): if len(self._nodes) > 6: nodes = list(self._nodes[:3]) + ['...'] + list(self._nodes[-3:]) else: nodes = self._nodes nodes_str = ', '.join(str(node) for node in nodes) return f'<{self.__class__.__name__} nodes={nodes_str}>'
2a69a60db69a5ebb3fab54064dd8ddd703791200809ee07862f1327ed036b28f
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module implements classes (called Fitters) which combine optimization algorithms (typically from `scipy.optimize`) with statistic functions to perform fitting. Fitters are implemented as callable classes. In addition to the data to fit, the ``__call__`` method takes an instance of `~astropy.modeling.core.FittableModel` as input, and returns a copy of the model with its parameters determined by the optimizer. Optimization algorithms, called "optimizers" are implemented in `~astropy.modeling.optimizers` and statistic functions are in `~astropy.modeling.statistic`. The goal is to provide an easy to extend framework and allow users to easily create new fitters by combining statistics with optimizers. There are two exceptions to the above scheme. `~astropy.modeling.fitting.LinearLSQFitter` uses Numpy's `~numpy.linalg.lstsq` function. `~astropy.modeling.fitting.LevMarLSQFitter` uses `~scipy.optimize.leastsq` which combines optimization and statistic in one implementation. """ # pylint: disable=invalid-name import abc import inspect import operator import warnings from functools import reduce, wraps from importlib.metadata import entry_points import numpy as np from astropy.units import Quantity from astropy.utils.decorators import deprecated from astropy.utils.exceptions import AstropyUserWarning from .optimizers import DEFAULT_ACC, DEFAULT_EPS, DEFAULT_MAXITER, SLSQP, Simplex from .spline import ( # noqa: F401 SplineExactKnotsFitter, SplineInterpolateFitter, SplineSmoothingFitter, SplineSplrepFitter) from .statistic import leastsquare from .utils import _combine_equivalency_dict, poly_map_domain __all__ = ['LinearLSQFitter', 'LevMarLSQFitter', 'TRFLSQFitter', 'DogBoxLSQFitter', 'LMLSQFitter', 'FittingWithOutlierRemoval', 'SLSQPLSQFitter', 'SimplexLSQFitter', 'JointFitter', 'Fitter', 'ModelLinearityError', "ModelsError"] # Statistic functions implemented in `astropy.modeling.statistic.py STATISTICS = [leastsquare] # Optimizers implemented in `astropy.modeling.optimizers.py OPTIMIZERS = [Simplex, SLSQP] class NonFiniteValueError(RuntimeError): """ Error raised when attempting to a non-finite value """ class Covariance(): """Class for covariance matrix calculated by fitter. """ def __init__(self, cov_matrix, param_names): self.cov_matrix = cov_matrix self.param_names = param_names def pprint(self, max_lines, round_val): # Print and label lower triangle of covariance matrix # Print rows for params up to `max_lines`, round floats to 'round_val' longest_name = max([len(x) for x in self.param_names]) ret_str = 'parameter variances / covariances \n' fstring = f'{"": <{longest_name}}| {{0}}\n' for i, row in enumerate(self.cov_matrix): if i <= max_lines-1: param = self.param_names[i] ret_str += (fstring.replace(' '*len(param), param, 1) .format(repr(np.round(row[:i+1], round_val))[7:-2])) else: ret_str += '...' return(ret_str.rstrip()) def __repr__(self): return(self.pprint(max_lines=10, round_val=3)) def __getitem__(self, params): # index covariance matrix by parameter names or indices if len(params) != 2: raise ValueError('Covariance must be indexed by two values.') if all(isinstance(item, str) for item in params): i1, i2 = self.param_names.index(params[0]), self.param_names.index(params[1]) elif all(isinstance(item, int) for item in params): i1, i2 = params else: raise TypeError('Covariance can be indexed by two parameter names or integer indices.') return(self.cov_matrix[i1][i2]) class StandardDeviations(): """ Class for fitting uncertainties.""" def __init__(self, cov_matrix, param_names): self.param_names = param_names self.stds = self._calc_stds(cov_matrix) def _calc_stds(self, cov_matrix): # sometimes scipy lstsq returns a non-sensical negative vals in the # diagonals of the cov_x it computes. stds = [np.sqrt(x) if x > 0 else None for x in np.diag(cov_matrix)] return stds def pprint(self, max_lines, round_val): longest_name = max([len(x) for x in self.param_names]) ret_str = 'standard deviations\n' for i, std in enumerate(self.stds): if i <= max_lines-1: param = self.param_names[i] ret_str += (f"{param}{' ' * (longest_name - len(param))}| " f"{np.round(std, round_val)}\n") else: ret_str += '...' return(ret_str.rstrip()) def __repr__(self): return(self.pprint(max_lines=10, round_val=3)) def __getitem__(self, param): if isinstance(param, str): i = self.param_names.index(param) elif isinstance(param, int): i = param else: raise TypeError('Standard deviation can be indexed by parameter name or integer.') return(self.stds[i]) class ModelsError(Exception): """Base class for model exceptions""" class ModelLinearityError(ModelsError): """ Raised when a non-linear model is passed to a linear fitter.""" class UnsupportedConstraintError(ModelsError, ValueError): """ Raised when a fitter does not support a type of constraint. """ class _FitterMeta(abc.ABCMeta): """ Currently just provides a registry for all Fitter classes. """ registry = set() def __new__(mcls, name, bases, members): cls = super().__new__(mcls, name, bases, members) if not inspect.isabstract(cls) and not name.startswith('_'): mcls.registry.add(cls) return cls def fitter_unit_support(func): """ This is a decorator that can be used to add support for dealing with quantities to any __call__ method on a fitter which may not support quantities itself. This is done by temporarily removing units from all parameters then adding them back once the fitting has completed. """ @wraps(func) def wrapper(self, model, x, y, z=None, **kwargs): equivalencies = kwargs.pop('equivalencies', None) data_has_units = (isinstance(x, Quantity) or isinstance(y, Quantity) or isinstance(z, Quantity)) model_has_units = model._has_units if data_has_units or model_has_units: if model._supports_unit_fitting: # We now combine any instance-level input equivalencies with user # specified ones at call-time. input_units_equivalencies = _combine_equivalency_dict( model.inputs, equivalencies, model.input_units_equivalencies) # If input_units is defined, we transform the input data into those # expected by the model. We hard-code the input names 'x', and 'y' # here since FittableModel instances have input names ('x',) or # ('x', 'y') if model.input_units is not None: if isinstance(x, Quantity): x = x.to(model.input_units[model.inputs[0]], equivalencies=input_units_equivalencies[model.inputs[0]]) if isinstance(y, Quantity) and z is not None: y = y.to(model.input_units[model.inputs[1]], equivalencies=input_units_equivalencies[model.inputs[1]]) # Create a dictionary mapping the real model inputs and outputs # names to the data. This remapping of names must be done here, after # the input data is converted to the correct units. rename_data = {model.inputs[0]: x} if z is not None: rename_data[model.outputs[0]] = z rename_data[model.inputs[1]] = y else: rename_data[model.outputs[0]] = y rename_data['z'] = None # We now strip away the units from the parameters, taking care to # first convert any parameters to the units that correspond to the # input units (to make sure that initial guesses on the parameters) # are in the right unit system model = model.without_units_for_data(**rename_data) if isinstance(model, tuple): rename_data['_left_kwargs'] = model[1] rename_data['_right_kwargs'] = model[2] model = model[0] # We strip away the units from the input itself add_back_units = False if isinstance(x, Quantity): add_back_units = True xdata = x.value else: xdata = np.asarray(x) if isinstance(y, Quantity): add_back_units = True ydata = y.value else: ydata = np.asarray(y) if z is not None: if isinstance(z, Quantity): add_back_units = True zdata = z.value else: zdata = np.asarray(z) # We run the fitting if z is None: model_new = func(self, model, xdata, ydata, **kwargs) else: model_new = func(self, model, xdata, ydata, zdata, **kwargs) # And finally we add back units to the parameters if add_back_units: model_new = model_new.with_units_from_data(**rename_data) return model_new else: raise NotImplementedError("This model does not support being " "fit to data with units.") else: return func(self, model, x, y, z=z, **kwargs) return wrapper class Fitter(metaclass=_FitterMeta): """ Base class for all fitters. Parameters ---------- optimizer : callable A callable implementing an optimization algorithm statistic : callable Statistic function """ supported_constraints = [] def __init__(self, optimizer, statistic): if optimizer is None: raise ValueError("Expected an optimizer.") if statistic is None: raise ValueError("Expected a statistic function.") if inspect.isclass(optimizer): # a callable class self._opt_method = optimizer() elif inspect.isfunction(optimizer): self._opt_method = optimizer else: raise ValueError("Expected optimizer to be a callable class or a function.") if inspect.isclass(statistic): self._stat_method = statistic() else: self._stat_method = statistic def objective_function(self, fps, *args): """ Function to minimize. Parameters ---------- fps : list parameters returned by the fitter args : list [model, [other_args], [input coordinates]] other_args may include weights or any other quantities specific for a statistic Notes ----- The list of arguments (args) is set in the `__call__` method. Fitters may overwrite this method, e.g. when statistic functions require other arguments. """ model = args[0] meas = args[-1] fitter_to_model_params(model, fps) res = self._stat_method(meas, model, *args[1:-1]) return res @staticmethod def _add_fitting_uncertainties(*args): """ When available, calculate and sets the parameter covariance matrix (model.cov_matrix) and standard deviations (model.stds). """ return None @abc.abstractmethod def __call__(self): """ This method performs the actual fitting and modifies the parameter list of a model. Fitter subclasses should implement this method. """ raise NotImplementedError("Subclasses should implement this method.") # TODO: I have ongoing branch elsewhere that's refactoring this module so that # all the fitter classes in here are Fitter subclasses. In the meantime we # need to specify that _FitterMeta is its metaclass. class LinearLSQFitter(metaclass=_FitterMeta): """ A class performing a linear least square fitting. Uses `numpy.linalg.lstsq` to do the fitting. Given a model and data, fits the model to the data and changes the model's parameters. Keeps a dictionary of auxiliary fitting information. Notes ----- Note that currently LinearLSQFitter does not support compound models. """ supported_constraints = ['fixed'] supports_masked_input = True def __init__(self, calc_uncertainties=False): self.fit_info = {'residuals': None, 'rank': None, 'singular_values': None, 'params': None } self._calc_uncertainties = calc_uncertainties @staticmethod def _is_invertible(m): """Check if inverse of matrix can be obtained.""" if m.shape[0] != m.shape[1]: return False if np.linalg.matrix_rank(m) < m.shape[0]: return False return True def _add_fitting_uncertainties(self, model, a, n_coeff, x, y, z=None, resids=None): """ Calculate and parameter covariance matrix and standard deviations and set `cov_matrix` and `stds` attributes. """ x_dot_x_prime = np.dot(a.T, a) masked = False or hasattr(y, 'mask') # check if invertible. if not, can't calc covariance. if not self._is_invertible(x_dot_x_prime): return(model) inv_x_dot_x_prime = np.linalg.inv(x_dot_x_prime) if z is None: # 1D models if len(model) == 1: # single model mask = None if masked: mask = y.mask xx = np.ma.array(x, mask=mask) RSS = [(1/(xx.count()-n_coeff)) * resids] if len(model) > 1: # model sets RSS = [] # collect sum residuals squared for each model in set for j in range(len(model)): mask = None if masked: mask = y.mask[..., j].flatten() xx = np.ma.array(x, mask=mask) eval_y = model(xx, model_set_axis=False) eval_y = np.rollaxis(eval_y, model.model_set_axis)[j] RSS.append((1/(xx.count()-n_coeff)) * np.sum((y[..., j] - eval_y)**2)) else: # 2D model if len(model) == 1: mask = None if masked: warnings.warn('Calculation of fitting uncertainties ' 'for 2D models with masked values not ' 'currently supported.\n', AstropyUserWarning) return xx, _ = np.ma.array(x, mask=mask), np.ma.array(y, mask=mask) # len(xx) instead of xx.count. this will break if values are masked? RSS = [(1/(len(xx)-n_coeff)) * resids] else: RSS = [] for j in range(len(model)): eval_z = model(x, y, model_set_axis=False) mask = None # need to figure out how to deal w/ masking here. if model.model_set_axis == 1: # model_set_axis passed when evaluating only refers to input shapes # so output must be reshaped for model_set_axis=1. eval_z = np.rollaxis(eval_z, 1) eval_z = eval_z[j] RSS.append([(1/(len(x)-n_coeff)) * np.sum((z[j] - eval_z)**2)]) covs = [inv_x_dot_x_prime * r for r in RSS] free_param_names = [x for x in model.fixed if (model.fixed[x] is False) and (model.tied[x] is False)] if len(covs) == 1: model.cov_matrix = Covariance(covs[0], model.param_names) model.stds = StandardDeviations(covs[0], free_param_names) else: model.cov_matrix = [Covariance(cov, model.param_names) for cov in covs] model.stds = [StandardDeviations(cov, free_param_names) for cov in covs] @staticmethod def _deriv_with_constraints(model, param_indices, x=None, y=None): if y is None: d = np.array(model.fit_deriv(x, *model.parameters)) else: d = np.array(model.fit_deriv(x, y, *model.parameters)) if model.col_fit_deriv: return d[param_indices] else: return d[..., param_indices] def _map_domain_window(self, model, x, y=None): """ Maps domain into window for a polynomial model which has these attributes. """ if y is None: if hasattr(model, 'domain') and model.domain is None: model.domain = [x.min(), x.max()] if hasattr(model, 'window') and model.window is None: model.window = [-1, 1] return poly_map_domain(x, model.domain, model.window) else: if hasattr(model, 'x_domain') and model.x_domain is None: model.x_domain = [x.min(), x.max()] if hasattr(model, 'y_domain') and model.y_domain is None: model.y_domain = [y.min(), y.max()] if hasattr(model, 'x_window') and model.x_window is None: model.x_window = [-1., 1.] if hasattr(model, 'y_window') and model.y_window is None: model.y_window = [-1., 1.] xnew = poly_map_domain(x, model.x_domain, model.x_window) ynew = poly_map_domain(y, model.y_domain, model.y_window) return xnew, ynew @fitter_unit_support def __call__(self, model, x, y, z=None, weights=None, rcond=None): """ Fit data to this model. Parameters ---------- model : `~astropy.modeling.FittableModel` model to fit to x, y, z x : array Input coordinates y : array-like Input coordinates z : array-like, optional Input coordinates. If the dependent (``y`` or ``z``) coordinate values are provided as a `numpy.ma.MaskedArray`, any masked points are ignored when fitting. Note that model set fitting is significantly slower when there are masked points (not just an empty mask), as the matrix equation has to be solved for each model separately when their coordinate grids differ. weights : array, optional Weights for fitting. For data with Gaussian uncertainties, the weights should be 1/sigma. rcond : float, optional Cut-off ratio for small singular values of ``a``. Singular values are set to zero if they are smaller than ``rcond`` times the largest singular value of ``a``. equivalencies : list or None, optional, keyword-only List of *additional* equivalencies that are should be applied in case x, y and/or z have units. Default is None. Returns ------- model_copy : `~astropy.modeling.FittableModel` a copy of the input model with parameters set by the fitter """ if not model.fittable: raise ValueError("Model must be a subclass of FittableModel") if not model.linear: raise ModelLinearityError('Model is not linear in parameters, ' 'linear fit methods should not be used.') if hasattr(model, "submodel_names"): raise ValueError("Model must be simple, not compound") _validate_constraints(self.supported_constraints, model) model_copy = model.copy() model_copy.sync_constraints = False _, fitparam_indices, _ = model_to_fit_params(model_copy) if model_copy.n_inputs == 2 and z is None: raise ValueError("Expected x, y and z for a 2 dimensional model.") farg = _convert_input(x, y, z, n_models=len(model_copy), model_set_axis=model_copy.model_set_axis) has_fixed = any(model_copy.fixed.values()) # This is also done by _convert_inputs, but we need it here to allow # checking the array dimensionality before that gets called: if weights is not None: weights = np.asarray(weights, dtype=float) if has_fixed: # The list of fixed params is the complement of those being fitted: fixparam_indices = [idx for idx in range(len(model_copy.param_names)) if idx not in fitparam_indices] # Construct matrix of user-fixed parameters that can be dotted with # the corresponding fit_deriv() terms, to evaluate corrections to # the dependent variable in order to fit only the remaining terms: fixparams = np.asarray([getattr(model_copy, model_copy.param_names[idx]).value for idx in fixparam_indices]) if len(farg) == 2: x, y = farg if weights is not None: # If we have separate weights for each model, apply the same # conversion as for the data, otherwise check common weights # as if for a single model: _, weights = _convert_input( x, weights, n_models=len(model_copy) if weights.ndim == y.ndim else 1, model_set_axis=model_copy.model_set_axis ) # map domain into window if hasattr(model_copy, 'domain'): x = self._map_domain_window(model_copy, x) if has_fixed: lhs = np.asarray(self._deriv_with_constraints(model_copy, fitparam_indices, x=x)) fixderivs = self._deriv_with_constraints(model_copy, fixparam_indices, x=x) else: lhs = np.asarray(model_copy.fit_deriv(x, *model_copy.parameters)) sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x) rhs = y else: x, y, z = farg if weights is not None: # If we have separate weights for each model, apply the same # conversion as for the data, otherwise check common weights # as if for a single model: _, _, weights = _convert_input( x, y, weights, n_models=len(model_copy) if weights.ndim == z.ndim else 1, model_set_axis=model_copy.model_set_axis ) # map domain into window if hasattr(model_copy, 'x_domain'): x, y = self._map_domain_window(model_copy, x, y) if has_fixed: lhs = np.asarray(self._deriv_with_constraints(model_copy, fitparam_indices, x=x, y=y)) fixderivs = self._deriv_with_constraints(model_copy, fixparam_indices, x=x, y=y) else: lhs = np.asanyarray(model_copy.fit_deriv(x, y, *model_copy.parameters)) sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x, y) if len(model_copy) > 1: # Just to be explicit (rather than baking in False == 0): model_axis = model_copy.model_set_axis or 0 if z.ndim > 2: # For higher-dimensional z, flatten all the axes except the # dimension along which models are stacked and transpose so # the model axis is *last* (I think this resolves Erik's # pending generalization from 80a6f25a): rhs = np.rollaxis(z, model_axis, z.ndim) rhs = rhs.reshape(-1, rhs.shape[-1]) else: # This "else" seems to handle the corner case where the # user has already flattened x/y before attempting a 2D fit # but z has a second axis for the model set. NB. This is # ~5-10x faster than using rollaxis. rhs = z.T if model_axis == 0 else z if weights is not None: # Same for weights if weights.ndim > 2: # Separate 2D weights for each model: weights = np.rollaxis(weights, model_axis, weights.ndim) weights = weights.reshape(-1, weights.shape[-1]) elif weights.ndim == z.ndim: # Separate, flattened weights for each model: weights = weights.T if model_axis == 0 else weights else: # Common weights for all the models: weights = weights.flatten() else: rhs = z.flatten() if weights is not None: weights = weights.flatten() # If the derivative is defined along rows (as with non-linear models) if model_copy.col_fit_deriv: lhs = np.asarray(lhs).T # Some models (eg. Polynomial1D) don't flatten multi-dimensional inputs # when constructing their Vandermonde matrix, which can lead to obscure # failures below. Ultimately, np.linalg.lstsq can't handle >2D matrices, # so just raise a slightly more informative error when this happens: if np.asanyarray(lhs).ndim > 2: raise ValueError(f"{type(model_copy).__name__} gives unsupported >2D " "derivative matrix for this x/y") # Subtract any terms fixed by the user from (a copy of) the RHS, in # order to fit the remaining terms correctly: if has_fixed: if model_copy.col_fit_deriv: fixderivs = np.asarray(fixderivs).T # as for lhs above rhs = rhs - fixderivs.dot(fixparams) # evaluate user-fixed terms # Subtract any terms implicit in the model from the RHS, which, like # user-fixed terms, affect the dependent variable but are not fitted: if sum_of_implicit_terms is not None: # If we have a model set, the extra axis must be added to # sum_of_implicit_terms as its innermost dimension, to match the # dimensionality of rhs after _convert_input "rolls" it as needed # by np.linalg.lstsq. The vector then gets broadcast to the right # number of sets (columns). This assumes all the models share the # same input coordinates, as is currently the case. if len(model_copy) > 1: sum_of_implicit_terms = sum_of_implicit_terms[..., np.newaxis] rhs = rhs - sum_of_implicit_terms if weights is not None: if rhs.ndim == 2: if weights.shape == rhs.shape: # separate weights for multiple models case: broadcast # lhs to have more dimension (for each model) lhs = lhs[..., np.newaxis] * weights[:, np.newaxis] rhs = rhs * weights else: lhs *= weights[:, np.newaxis] # Don't modify in-place in case rhs was the original # dependent variable array rhs = rhs * weights[:, np.newaxis] else: lhs *= weights[:, np.newaxis] rhs = rhs * weights scl = (lhs * lhs).sum(0) lhs /= scl masked = np.any(np.ma.getmask(rhs)) if weights is not None and not masked and np.any(np.isnan(lhs)): raise ValueError('Found NaNs in the coefficient matrix, which ' 'should not happen and would crash the lapack ' 'routine. Maybe check that weights are not null.') a = None # need for calculating covarience if ((masked and len(model_copy) > 1) or (weights is not None and weights.ndim > 1)): # Separate masks or weights for multiple models case: Numpy's # lstsq supports multiple dimensions only for rhs, so we need to # loop manually on the models. This may be fixed in the future # with https://github.com/numpy/numpy/pull/15777. # Initialize empty array of coefficients and populate it one model # at a time. The shape matches the number of coefficients from the # Vandermonde matrix and the number of models from the RHS: lacoef = np.zeros(lhs.shape[1:2] + rhs.shape[-1:], dtype=rhs.dtype) # Arrange the lhs as a stack of 2D matrices that we can iterate # over to get the correctly-orientated lhs for each model: if lhs.ndim > 2: lhs_stack = np.rollaxis(lhs, -1, 0) else: lhs_stack = np.broadcast_to(lhs, rhs.shape[-1:] + lhs.shape) # Loop over the models and solve for each one. By this point, the # model set axis is the second of two. Transpose rather than using, # say, np.moveaxis(array, -1, 0), since it's slightly faster and # lstsq can't handle >2D arrays anyway. This could perhaps be # optimized by collecting together models with identical masks # (eg. those with no rejected points) into one operation, though it # will still be relatively slow when calling lstsq repeatedly. for model_lhs, model_rhs, model_lacoef in zip(lhs_stack, rhs.T, lacoef.T): # Cull masked points on both sides of the matrix equation: good = ~model_rhs.mask if masked else slice(None) model_lhs = model_lhs[good] model_rhs = model_rhs[good][..., np.newaxis] a = model_lhs # Solve for this model: t_coef, resids, rank, sval = np.linalg.lstsq(model_lhs, model_rhs, rcond) model_lacoef[:] = t_coef.T else: # If we're fitting one or more models over a common set of points, # we only have to solve a single matrix equation, which is an order # of magnitude faster than calling lstsq() once per model below: good = ~rhs.mask if masked else slice(None) # latter is a no-op a = lhs[good] # Solve for one or more models: lacoef, resids, rank, sval = np.linalg.lstsq(lhs[good], rhs[good], rcond) self.fit_info['residuals'] = resids self.fit_info['rank'] = rank self.fit_info['singular_values'] = sval lacoef /= scl[:, np.newaxis] if scl.ndim < rhs.ndim else scl self.fit_info['params'] = lacoef fitter_to_model_params(model_copy, lacoef.flatten()) # TODO: Only Polynomial models currently have an _order attribute; # maybe change this to read isinstance(model, PolynomialBase) if (hasattr(model_copy, '_order') and len(model_copy) == 1 and not has_fixed and rank != model_copy._order): warnings.warn("The fit may be poorly conditioned\n", AstropyUserWarning) # calculate and set covariance matrix and standard devs. on model if self._calc_uncertainties: if len(y) > len(lacoef): self._add_fitting_uncertainties(model_copy, a*scl, len(lacoef), x, y, z, resids) model_copy.sync_constraints = True return model_copy class FittingWithOutlierRemoval: """ This class combines an outlier removal technique with a fitting procedure. Basically, given a maximum number of iterations ``niter``, outliers are removed and fitting is performed for each iteration, until no new outliers are found or ``niter`` is reached. Parameters ---------- fitter : `Fitter` An instance of any Astropy fitter, i.e., LinearLSQFitter, LevMarLSQFitter, SLSQPLSQFitter, SimplexLSQFitter, JointFitter. For model set fitting, this must understand masked input data (as indicated by the fitter class attribute ``supports_masked_input``). outlier_func : callable A function for outlier removal. If this accepts an ``axis`` parameter like the `numpy` functions, the appropriate value will be supplied automatically when fitting model sets (unless overridden in ``outlier_kwargs``), to find outliers for each model separately; otherwise, the same filtering must be performed in a loop over models, which is almost an order of magnitude slower. niter : int, optional Maximum number of iterations. outlier_kwargs : dict, optional Keyword arguments for outlier_func. Attributes ---------- fit_info : dict The ``fit_info`` (if any) from the last iteration of the wrapped ``fitter`` during the most recent fit. An entry is also added with the keyword ``niter`` that records the actual number of fitting iterations performed (as opposed to the user-specified maximum). """ def __init__(self, fitter, outlier_func, niter=3, **outlier_kwargs): self.fitter = fitter self.outlier_func = outlier_func self.niter = niter self.outlier_kwargs = outlier_kwargs self.fit_info = {'niter': None} def __str__(self): return (f"Fitter: {self.fitter.__class__.__name__}\n" f"Outlier function: {self.outlier_func.__name__}\n" f"Num. of iterations: {self.niter}\n" f"Outlier func. args.: {self.outlier_kwargs}") def __repr__(self): return (f"{self.__class__.__name__}(fitter: {self.fitter.__class__.__name__}, " f"outlier_func: {self.outlier_func.__name__}," f" niter: {self.niter}, outlier_kwargs: {self.outlier_kwargs})") def __call__(self, model, x, y, z=None, weights=None, **kwargs): """ Parameters ---------- model : `~astropy.modeling.FittableModel` An analytic model which will be fit to the provided data. This also contains the initial guess for an optimization algorithm. x : array-like Input coordinates. y : array-like Data measurements (1D case) or input coordinates (2D case). z : array-like, optional Data measurements (2D case). weights : array-like, optional Weights to be passed to the fitter. kwargs : dict, optional Keyword arguments to be passed to the fitter. Returns ------- fitted_model : `~astropy.modeling.FittableModel` Fitted model after outlier removal. mask : `numpy.ndarray` Boolean mask array, identifying which points were used in the final fitting iteration (False) and which were found to be outliers or were masked in the input (True). """ # For single models, the data get filtered here at each iteration and # then passed to the fitter, which is the historical behavior and # works even for fitters that don't understand masked arrays. For model # sets, the fitter must be able to filter masked data internally, # because fitters require a single set of x/y coordinates whereas the # eliminated points can vary between models. To avoid this limitation, # we could fall back to looping over individual model fits, but it # would likely be fiddly and involve even more overhead (and the # non-linear fitters don't work with model sets anyway, as of writing). if len(model) == 1: model_set_axis = None else: if (not hasattr(self.fitter, 'supports_masked_input') or self.fitter.supports_masked_input is not True): raise ValueError(f"{type(self.fitter).__name__} cannot fit model sets with masked " "values") # Fitters use their input model's model_set_axis to determine how # their input data are stacked: model_set_axis = model.model_set_axis # Construct input coordinate tuples for fitters & models that are # appropriate for the dimensionality being fitted: if z is None: coords = (x, ) data = y else: coords = x, y data = z # For model sets, construct a numpy-standard "axis" tuple for the # outlier function, to treat each model separately (if supported): if model_set_axis is not None: if model_set_axis < 0: model_set_axis += data.ndim if 'axis' not in self.outlier_kwargs: # allow user override # This also works for False (like model instantiation): self.outlier_kwargs['axis'] = tuple( n for n in range(data.ndim) if n != model_set_axis ) loop = False # Starting fit, prior to any iteration and masking: fitted_model = self.fitter(model, x, y, z, weights=weights, **kwargs) filtered_data = np.ma.masked_array(data) if filtered_data.mask is np.ma.nomask: filtered_data.mask = False filtered_weights = weights last_n_masked = filtered_data.mask.sum() n = 0 # (allow recording no. of iterations when 0) # Perform the iterative fitting: for n in range(1, self.niter + 1): # (Re-)evaluate the last model: model_vals = fitted_model(*coords, model_set_axis=False) # Determine the outliers: if not loop: # Pass axis parameter if outlier_func accepts it, otherwise # prepare for looping over models: try: filtered_data = self.outlier_func( filtered_data - model_vals, **self.outlier_kwargs ) # If this happens to catch an error with a parameter other # than axis, the next attempt will fail accordingly: except TypeError: if model_set_axis is None: raise else: self.outlier_kwargs.pop('axis', None) loop = True # Construct MaskedArray to hold filtered values: filtered_data = np.ma.masked_array( filtered_data, dtype=np.result_type(filtered_data, model_vals), copy=True ) # Make sure the mask is an array, not just nomask: if filtered_data.mask is np.ma.nomask: filtered_data.mask = False # Get views transposed appropriately for iteration # over the set (handling data & mask separately due to # NumPy issue #8506): data_T = np.rollaxis(filtered_data, model_set_axis, 0) mask_T = np.rollaxis(filtered_data.mask, model_set_axis, 0) if loop: model_vals_T = np.rollaxis(model_vals, model_set_axis, 0) for row_data, row_mask, row_mod_vals in zip(data_T, mask_T, model_vals_T): masked_residuals = self.outlier_func( row_data - row_mod_vals, **self.outlier_kwargs ) row_data.data[:] = masked_residuals.data row_mask[:] = masked_residuals.mask # Issue speed warning after the fact, so it only shows up when # the TypeError is genuinely due to the axis argument. warnings.warn('outlier_func did not accept axis argument; ' 'reverted to slow loop over models.', AstropyUserWarning) # Recombine newly-masked residuals with model to get masked values: filtered_data += model_vals # Re-fit the data after filtering, passing masked/unmasked values # for single models / sets, respectively: if model_set_axis is None: good = ~filtered_data.mask if weights is not None: filtered_weights = weights[good] fitted_model = self.fitter(fitted_model, *(c[good] for c in coords), filtered_data.data[good], weights=filtered_weights, **kwargs) else: fitted_model = self.fitter(fitted_model, *coords, filtered_data, weights=filtered_weights, **kwargs) # Stop iteration if the masked points are no longer changing (with # cumulative rejection we only need to compare how many there are): this_n_masked = filtered_data.mask.sum() # (minimal overhead) if this_n_masked == last_n_masked: break last_n_masked = this_n_masked self.fit_info = {'niter': n} self.fit_info.update(getattr(self.fitter, 'fit_info', {})) return fitted_model, filtered_data.mask class _NonLinearLSQFitter(metaclass=_FitterMeta): """ Base class for Non-Linear least-squares fitters Parameters ---------- calc_uncertainties : bool If the covarience matrix should be computed and set in the fit_info. Default: False use_min_max_bounds : bool If the set parameter bounds for a model will be enforced each given parameter while fitting via a simple min/max condition. Default: True """ supported_constraints = ['fixed', 'tied', 'bounds'] """ The constraint types supported by this fitter type. """ def __init__(self, calc_uncertainties=False, use_min_max_bounds=True): self.fit_info = None self._calc_uncertainties = calc_uncertainties self._use_min_max_bounds = use_min_max_bounds super().__init__() def objective_function(self, fps, *args): """ Function to minimize. Parameters ---------- fps : list parameters returned by the fitter args : list [model, [weights], [input coordinates]] """ model = args[0] weights = args[1] fitter_to_model_params(model, fps, self._use_min_max_bounds) meas = args[-1] if weights is None: value = np.ravel(model(*args[2: -1]) - meas) else: value = np.ravel(weights * (model(*args[2: -1]) - meas)) if not np.all(np.isfinite(value)): raise NonFiniteValueError("Objective function has encountered a non-finite value, " "this will cause the fit to fail!") return value @staticmethod def _add_fitting_uncertainties(model, cov_matrix): """ Set ``cov_matrix`` and ``stds`` attributes on model with parameter covariance matrix returned by ``optimize.leastsq``. """ free_param_names = [x for x in model.fixed if (model.fixed[x] is False) and (model.tied[x] is False)] model.cov_matrix = Covariance(cov_matrix, free_param_names) model.stds = StandardDeviations(cov_matrix, free_param_names) @staticmethod def _wrap_deriv(params, model, weights, x, y, z=None): """ Wraps the method calculating the Jacobian of the function to account for model constraints. `scipy.optimize.leastsq` expects the function derivative to have the above signature (parlist, (argtuple)). In order to accommodate model constraints, instead of using p directly, we set the parameter list in this function. """ if weights is None: weights = 1.0 if any(model.fixed.values()) or any(model.tied.values()): # update the parameters with the current values from the fitter fitter_to_model_params(model, params) if z is None: full = np.array(model.fit_deriv(x, *model.parameters)) if not model.col_fit_deriv: full_deriv = np.ravel(weights) * full.T else: full_deriv = np.ravel(weights) * full else: full = np.array([np.ravel(_) for _ in model.fit_deriv(x, y, *model.parameters)]) if not model.col_fit_deriv: full_deriv = np.ravel(weights) * full.T else: full_deriv = np.ravel(weights) * full pars = [getattr(model, name) for name in model.param_names] fixed = [par.fixed for par in pars] tied = [par.tied for par in pars] tied = list(np.where([par.tied is not False for par in pars], True, tied)) fix_and_tie = np.logical_or(fixed, tied) ind = np.logical_not(fix_and_tie) if not model.col_fit_deriv: residues = np.asarray(full_deriv[np.nonzero(ind)]).T else: residues = full_deriv[np.nonzero(ind)] return [np.ravel(_) for _ in residues] else: if z is None: try: return np.array([np.ravel(_) for _ in np.array(weights) * np.array(model.fit_deriv(x, *params))]) except ValueError: return np.array([np.ravel(_) for _ in np.array(weights) * np.moveaxis( np.array(model.fit_deriv(x, *params)), -1, 0)]).transpose() else: if not model.col_fit_deriv: return [np.ravel(_) for _ in (np.ravel(weights) * np.array(model.fit_deriv(x, y, *params)).T).T] return [np.ravel(_) for _ in weights * np.array(model.fit_deriv(x, y, *params))] def _compute_param_cov(self, model, y, init_values, cov_x, fitparams, farg): # now try to compute the true covariance matrix if (len(y) > len(init_values)) and cov_x is not None: sum_sqrs = np.sum(self.objective_function(fitparams, *farg)**2) dof = len(y) - len(init_values) self.fit_info['param_cov'] = cov_x * sum_sqrs / dof else: self.fit_info['param_cov'] = None if self._calc_uncertainties is True: if self.fit_info['param_cov'] is not None: self._add_fitting_uncertainties(model, self.fit_info['param_cov']) def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian): return None, None, None @fitter_unit_support def __call__(self, model, x, y, z=None, weights=None, maxiter=DEFAULT_MAXITER, acc=DEFAULT_ACC, epsilon=DEFAULT_EPS, estimate_jacobian=False): """ Fit data to this model. Parameters ---------- model : `~astropy.modeling.FittableModel` model to fit to x, y, z x : array input coordinates y : array input coordinates z : array, optional input coordinates weights : array, optional Weights for fitting. For data with Gaussian uncertainties, the weights should be 1/sigma. maxiter : int maximum number of iterations acc : float Relative error desired in the approximate solution epsilon : float A suitable step length for the forward-difference approximation of the Jacobian (if model.fjac=None). If epsfcn is less than the machine precision, it is assumed that the relative errors in the functions are of the order of the machine precision. estimate_jacobian : bool If False (default) and if the model has a fit_deriv method, it will be used. Otherwise the Jacobian will be estimated. If True, the Jacobian will be estimated in any case. equivalencies : list or None, optional, keyword-only List of *additional* equivalencies that are should be applied in case x, y and/or z have units. Default is None. Returns ------- model_copy : `~astropy.modeling.FittableModel` a copy of the input model with parameters set by the fitter """ model_copy = _validate_model(model, self.supported_constraints) model_copy.sync_constraints = False farg = (model_copy, weights, ) + _convert_input(x, y, z) init_values, fitparams, cov_x = self._run_fitter(model_copy, farg, maxiter, acc, epsilon, estimate_jacobian) self._compute_param_cov(model_copy, y, init_values, cov_x, fitparams, farg) model.sync_constraints = True return model_copy class LevMarLSQFitter(_NonLinearLSQFitter): """ Levenberg-Marquardt algorithm and least squares statistic. Parameters ---------- calc_uncertainties : bool If the covarience matrix should be computed and set in the fit_info. Default: False Attributes ---------- fit_info : dict The `scipy.optimize.leastsq` result for the most recent fit (see notes). Notes ----- The ``fit_info`` dictionary contains the values returned by `scipy.optimize.leastsq` for the most recent fit, including the values from the ``infodict`` dictionary it returns. See the `scipy.optimize.leastsq` documentation for details on the meaning of these values. Note that the ``x`` return value is *not* included (as it is instead the parameter values of the returned model). Additionally, one additional element of ``fit_info`` is computed whenever a model is fit, with the key 'param_cov'. The corresponding value is the covariance matrix of the parameters as a 2D numpy array. The order of the matrix elements matches the order of the parameters in the fitted model (i.e., the same order as ``model.param_names``). """ def __init__(self, calc_uncertainties=False): super().__init__(calc_uncertainties) self.fit_info = {'nfev': None, 'fvec': None, 'fjac': None, 'ipvt': None, 'qtf': None, 'message': None, 'ierr': None, 'param_jac': None, 'param_cov': None} def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian): from scipy import optimize if model.fit_deriv is None or estimate_jacobian: dfunc = None else: dfunc = self._wrap_deriv init_values, _, _ = model_to_fit_params(model) fitparams, cov_x, dinfo, mess, ierr = optimize.leastsq( self.objective_function, init_values, args=farg, Dfun=dfunc, col_deriv=model.col_fit_deriv, maxfev=maxiter, epsfcn=epsilon, xtol=acc, full_output=True) fitter_to_model_params(model, fitparams) self.fit_info.update(dinfo) self.fit_info['cov_x'] = cov_x self.fit_info['message'] = mess self.fit_info['ierr'] = ierr if ierr not in [1, 2, 3, 4]: warnings.warn("The fit may be unsuccessful; check " "fit_info['message'] for more information.", AstropyUserWarning) return init_values, fitparams, cov_x class _NLLSQFitter(_NonLinearLSQFitter): """ Wrapper class for `scipy.optimize.least_squares` method, which provides: - Trust Region Reflective - dogbox - Levenberg-Marqueardt algorithms using the least squares statistic. Parameters ---------- method : str ‘trf’ : Trust Region Reflective algorithm, particularly suitable for large sparse problems with bounds. Generally robust method. ‘dogbox’ : dogleg algorithm with rectangular trust regions, typical use case is small problems with bounds. Not recommended for problems with rank-deficient Jacobian. ‘lm’ : Levenberg-Marquardt algorithm as implemented in MINPACK. Doesn’t handle bounds and sparse Jacobians. Usually the most efficient method for small unconstrained problems. calc_uncertainties : bool If the covarience matrix should be computed and set in the fit_info. Default: False use_min_max_bounds: bool If the set parameter bounds for a model will be enforced each given parameter while fitting via a simple min/max condition. A True setting will replicate how LevMarLSQFitter enforces bounds. Default: False Attributes ---------- fit_info : A `scipy.optimize.OptimizeResult` class which contains all of the most recent fit information """ def __init__(self, method, calc_uncertainties=False, use_min_max_bounds=False): super().__init__(calc_uncertainties, use_min_max_bounds) self._method = method def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian): from scipy import optimize from scipy.linalg import svd if model.fit_deriv is None or estimate_jacobian: dfunc = '2-point' else: def _dfunc(params, model, weights, x, y, z=None): if model.col_fit_deriv: return np.transpose(self._wrap_deriv(params, model, weights, x, y, z)) else: return self._wrap_deriv(params, model, weights, x, y, z) dfunc = _dfunc init_values, _, bounds = model_to_fit_params(model) # Note, if use_min_max_bounds is True we are defaulting to enforcing bounds # using the old method employed by LevMarLSQFitter, this is different # from the method that optimize.least_squares employs to enforce bounds # thus we override the bounds being passed to optimize.least_squares so # that it will not enforce any bounding. if self._use_min_max_bounds: bounds = (-np.inf, np.inf) self.fit_info = optimize.least_squares( self.objective_function, init_values, args=farg, jac=dfunc, max_nfev=maxiter, diff_step=np.sqrt(epsilon), xtol=acc, method=self._method, bounds=bounds ) # Adapted from ~scipy.optimize.minpack, see: # https://github.com/scipy/scipy/blob/47bb6febaa10658c72962b9615d5d5aa2513fa3a/scipy/optimize/minpack.py#L795-L816 # Do Moore-Penrose inverse discarding zero singular values. _, s, VT = svd(self.fit_info.jac, full_matrices=False) threshold = np.finfo(float).eps * max(self.fit_info.jac.shape) * s[0] s = s[s > threshold] VT = VT[:s.size] cov_x = np.dot(VT.T / s**2, VT) fitter_to_model_params(model, self.fit_info.x, False) if not self.fit_info.success: warnings.warn("The fit may be unsuccessful; check: \n" f" {self.fit_info.message}", AstropyUserWarning) return init_values, self.fit_info.x, cov_x class TRFLSQFitter(_NLLSQFitter): """ Trust Region Reflective algorithm and least squares statistic. Parameters ---------- calc_uncertainties : bool If the covarience matrix should be computed and set in the fit_info. Default: False use_min_max_bounds: bool If the set parameter bounds for a model will be enforced each given parameter while fitting via a simple min/max condition. A True setting will replicate how LevMarLSQFitter enforces bounds. Default: False Attributes ---------- fit_info : A `scipy.optimize.OptimizeResult` class which contains all of the most recent fit information """ def __init__(self, calc_uncertainties=False, use_min_max_bounds=False): super().__init__('trf', calc_uncertainties, use_min_max_bounds) class DogBoxLSQFitter(_NLLSQFitter): """ DogBox algorithm and least squares statistic. Parameters ---------- calc_uncertainties : bool If the covarience matrix should be computed and set in the fit_info. Default: False use_min_max_bounds: bool If the set parameter bounds for a model will be enforced each given parameter while fitting via a simple min/max condition. A True setting will replicate how LevMarLSQFitter enforces bounds. Default: False Attributes ---------- fit_info : A `scipy.optimize.OptimizeResult` class which contains all of the most recent fit information """ def __init__(self, calc_uncertainties=False, use_min_max_bounds=False): super().__init__('dogbox', calc_uncertainties, use_min_max_bounds) class LMLSQFitter(_NLLSQFitter): """ `scipy.optimize.least_squares` Levenberg-Marquardt algorithm and least squares statistic. Parameters ---------- calc_uncertainties : bool If the covarience matrix should be computed and set in the fit_info. Default: False Attributes ---------- fit_info : A `scipy.optimize.OptimizeResult` class which contains all of the most recent fit information """ def __init__(self, calc_uncertainties=False): super().__init__('lm', calc_uncertainties, True) class SLSQPLSQFitter(Fitter): """ Sequential Least Squares Programming (SLSQP) optimization algorithm and least squares statistic. Raises ------ ModelLinearityError A linear model is passed to a nonlinear fitter Notes ----- See also the `~astropy.modeling.optimizers.SLSQP` optimizer. """ supported_constraints = SLSQP.supported_constraints def __init__(self): super().__init__(optimizer=SLSQP, statistic=leastsquare) self.fit_info = {} @fitter_unit_support def __call__(self, model, x, y, z=None, weights=None, **kwargs): """ Fit data to this model. Parameters ---------- model : `~astropy.modeling.FittableModel` model to fit to x, y, z x : array input coordinates y : array input coordinates z : array, optional input coordinates weights : array, optional Weights for fitting. For data with Gaussian uncertainties, the weights should be 1/sigma. kwargs : dict optional keyword arguments to be passed to the optimizer or the statistic verblevel : int 0-silent 1-print summary upon completion, 2-print summary after each iteration maxiter : int maximum number of iterations epsilon : float the step size for finite-difference derivative estimates acc : float Requested accuracy equivalencies : list or None, optional, keyword-only List of *additional* equivalencies that are should be applied in case x, y and/or z have units. Default is None. Returns ------- model_copy : `~astropy.modeling.FittableModel` a copy of the input model with parameters set by the fitter """ model_copy = _validate_model(model, self._opt_method.supported_constraints) model_copy.sync_constraints = False farg = _convert_input(x, y, z) farg = (model_copy, weights, ) + farg init_values, _, _ = model_to_fit_params(model_copy) fitparams, self.fit_info = self._opt_method( self.objective_function, init_values, farg, **kwargs) fitter_to_model_params(model_copy, fitparams) model_copy.sync_constraints = True return model_copy class SimplexLSQFitter(Fitter): """ Simplex algorithm and least squares statistic. Raises ------ `ModelLinearityError` A linear model is passed to a nonlinear fitter """ supported_constraints = Simplex.supported_constraints def __init__(self): super().__init__(optimizer=Simplex, statistic=leastsquare) self.fit_info = {} @fitter_unit_support def __call__(self, model, x, y, z=None, weights=None, **kwargs): """ Fit data to this model. Parameters ---------- model : `~astropy.modeling.FittableModel` model to fit to x, y, z x : array input coordinates y : array input coordinates z : array, optional input coordinates weights : array, optional Weights for fitting. For data with Gaussian uncertainties, the weights should be 1/sigma. kwargs : dict optional keyword arguments to be passed to the optimizer or the statistic maxiter : int maximum number of iterations acc : float Relative error in approximate solution equivalencies : list or None, optional, keyword-only List of *additional* equivalencies that are should be applied in case x, y and/or z have units. Default is None. Returns ------- model_copy : `~astropy.modeling.FittableModel` a copy of the input model with parameters set by the fitter """ model_copy = _validate_model(model, self._opt_method.supported_constraints) model_copy.sync_constraints = False farg = _convert_input(x, y, z) farg = (model_copy, weights, ) + farg init_values, _, _ = model_to_fit_params(model_copy) fitparams, self.fit_info = self._opt_method( self.objective_function, init_values, farg, **kwargs) fitter_to_model_params(model_copy, fitparams) model_copy.sync_constraints = True return model_copy class JointFitter(metaclass=_FitterMeta): """ Fit models which share a parameter. For example, fit two gaussians to two data sets but keep the FWHM the same. Parameters ---------- models : list a list of model instances jointparameters : list a list of joint parameters initvals : list a list of initial values """ def __init__(self, models, jointparameters, initvals): self.models = list(models) self.initvals = list(initvals) self.jointparams = jointparameters self._verify_input() self.fitparams = self.model_to_fit_params() # a list of model.n_inputs self.modeldims = [m.n_inputs for m in self.models] # sum all model dimensions self.ndim = np.sum(self.modeldims) def model_to_fit_params(self): fparams = [] fparams.extend(self.initvals) for model in self.models: params = model.parameters.tolist() joint_params = self.jointparams[model] param_metrics = model._param_metrics for param_name in joint_params: slice_ = param_metrics[param_name]['slice'] del params[slice_] fparams.extend(params) return fparams def objective_function(self, fps, *args): """ Function to minimize. Parameters ---------- fps : list the fitted parameters - result of an one iteration of the fitting algorithm args : dict tuple of measured and input coordinates args is always passed as a tuple from optimize.leastsq """ lstsqargs = list(args) fitted = [] fitparams = list(fps) numjp = len(self.initvals) # make a separate list of the joint fitted parameters jointfitparams = fitparams[:numjp] del fitparams[:numjp] for model in self.models: joint_params = self.jointparams[model] margs = lstsqargs[:model.n_inputs + 1] del lstsqargs[:model.n_inputs + 1] # separate each model separately fitted parameters numfp = len(model._parameters) - len(joint_params) mfparams = fitparams[:numfp] del fitparams[:numfp] # recreate the model parameters mparams = [] param_metrics = model._param_metrics for param_name in model.param_names: if param_name in joint_params: index = joint_params.index(param_name) # should do this with slices in case the # parameter is not a number mparams.extend([jointfitparams[index]]) else: slice_ = param_metrics[param_name]['slice'] plen = slice_.stop - slice_.start mparams.extend(mfparams[:plen]) del mfparams[:plen] modelfit = model.evaluate(margs[:-1], *mparams) fitted.extend(modelfit - margs[-1]) return np.ravel(fitted) def _verify_input(self): if len(self.models) <= 1: raise TypeError(f"Expected >1 models, {len(self.models)} is given") if len(self.jointparams.keys()) < 2: raise TypeError("At least two parameters are expected, " f"{len(self.jointparams.keys())} is given") for j in self.jointparams.keys(): if len(self.jointparams[j]) != len(self.initvals): raise TypeError(f"{len(self.jointparams[j])} parameter(s) " f"provided but {len(self.initvals)} expected") def __call__(self, *args): """ Fit data to these models keeping some of the parameters common to the two models. """ from scipy import optimize if len(args) != reduce(lambda x, y: x + 1 + y + 1, self.modeldims): raise ValueError(f"Expected {reduce(lambda x, y: x + 1 + y + 1, self.modeldims)} " f"coordinates in args but {len(args)} provided") self.fitparams[:], _ = optimize.leastsq(self.objective_function, self.fitparams, args=args) fparams = self.fitparams[:] numjp = len(self.initvals) # make a separate list of the joint fitted parameters jointfitparams = fparams[:numjp] del fparams[:numjp] for model in self.models: # extract each model's fitted parameters joint_params = self.jointparams[model] numfp = len(model._parameters) - len(joint_params) mfparams = fparams[:numfp] del fparams[:numfp] # recreate the model parameters mparams = [] param_metrics = model._param_metrics for param_name in model.param_names: if param_name in joint_params: index = joint_params.index(param_name) # should do this with slices in case the parameter # is not a number mparams.extend([jointfitparams[index]]) else: slice_ = param_metrics[param_name]['slice'] plen = slice_.stop - slice_.start mparams.extend(mfparams[:plen]) del mfparams[:plen] model.parameters = np.array(mparams) def _convert_input(x, y, z=None, n_models=1, model_set_axis=0): """Convert inputs to float arrays.""" x = np.asanyarray(x, dtype=float) y = np.asanyarray(y, dtype=float) if z is not None: z = np.asanyarray(z, dtype=float) data_ndim, data_shape = z.ndim, z.shape else: data_ndim, data_shape = y.ndim, y.shape # For compatibility with how the linear fitter code currently expects to # work, shift the dependent variable's axes to the expected locations if n_models > 1 or data_ndim > x.ndim: if (model_set_axis or 0) >= data_ndim: raise ValueError("model_set_axis out of range") if data_shape[model_set_axis] != n_models: raise ValueError( "Number of data sets (y or z array) is expected to equal " "the number of parameter sets" ) if z is None: # For a 1-D model the y coordinate's model-set-axis is expected to # be last, so that its first dimension is the same length as the x # coordinates. This is in line with the expectations of # numpy.linalg.lstsq: # https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html # That is, each model should be represented by a column. TODO: # Obviously this is a detail of np.linalg.lstsq and should be # handled specifically by any fitters that use it... y = np.rollaxis(y, model_set_axis, y.ndim) data_shape = y.shape[:-1] else: # Shape of z excluding model_set_axis data_shape = (z.shape[:model_set_axis] + z.shape[model_set_axis + 1:]) if z is None: if data_shape != x.shape: raise ValueError("x and y should have the same shape") farg = (x, y) else: if not (x.shape == y.shape == data_shape): raise ValueError("x, y and z should have the same shape") farg = (x, y, z) return farg # TODO: These utility functions are really particular to handling # bounds/tied/fixed constraints for scipy.optimize optimizers that do not # support them inherently; this needs to be reworked to be clear about this # distinction (and the fact that these are not necessarily applicable to any # arbitrary fitter--as evidenced for example by the fact that JointFitter has # its own versions of these) # TODO: Most of this code should be entirely rewritten; it should not be as # inefficient as it is. def fitter_to_model_params(model, fps, use_min_max_bounds=True): """ Constructs the full list of model parameters from the fitted and constrained parameters. Parameters ---------- model : The model being fit fps : The fit parameter values to be assigned use_min_max_bounds: bool If the set parameter bounds for model will be enforced on each parameter with bounds. Default: True """ _, fit_param_indices, _ = model_to_fit_params(model) has_tied = any(model.tied.values()) has_fixed = any(model.fixed.values()) has_bound = any(b != (None, None) for b in model.bounds.values()) parameters = model.parameters if not (has_tied or has_fixed or has_bound): # We can just assign directly model.parameters = fps return fit_param_indices = set(fit_param_indices) offset = 0 param_metrics = model._param_metrics for idx, name in enumerate(model.param_names): if idx not in fit_param_indices: continue slice_ = param_metrics[name]['slice'] shape = param_metrics[name]['shape'] # This is determining which range of fps (the fitted parameters) maps # to parameters of the model size = reduce(operator.mul, shape, 1) values = fps[offset:offset + size] # Check bounds constraints if model.bounds[name] != (None, None) and use_min_max_bounds: _min, _max = model.bounds[name] if _min is not None: values = np.fmax(values, _min) if _max is not None: values = np.fmin(values, _max) parameters[slice_] = values offset += size # Update model parameters before calling ``tied`` constraints. model._array_to_parameters() # This has to be done in a separate loop due to how tied parameters are # currently evaluated (the fitted parameters need to actually be *set* on # the model first, for use in evaluating the "tied" expression--it might be # better to change this at some point if has_tied: for idx, name in enumerate(model.param_names): if model.tied[name]: value = model.tied[name](model) slice_ = param_metrics[name]['slice'] # To handle multiple tied constraints, model parameters # need to be updated after each iteration. parameters[slice_] = value model._array_to_parameters() @deprecated('5.1', 'private method: _fitter_to_model_params has been made public now') def _fitter_to_model_params(model, fps): return fitter_to_model_params(model, fps) def model_to_fit_params(model): """ Convert a model instance's parameter array to an array that can be used with a fitter that doesn't natively support fixed or tied parameters. In particular, it removes fixed/tied parameters from the parameter array. These may be a subset of the model parameters, if some of them are held constant or tied. """ fitparam_indices = list(range(len(model.param_names))) model_params = model.parameters model_bounds = list(model.bounds.values()) if any(model.fixed.values()) or any(model.tied.values()): params = list(model_params) param_metrics = model._param_metrics for idx, name in list(enumerate(model.param_names))[::-1]: if model.fixed[name] or model.tied[name]: slice_ = param_metrics[name]['slice'] del params[slice_] del model_bounds[slice_] del fitparam_indices[idx] model_params = np.array(params) for idx, bound in enumerate(model_bounds): if bound[0] is None: lower = -np.inf else: lower = bound[0] if bound[1] is None: upper = np.inf else: upper = bound[1] model_bounds[idx] = (lower, upper) model_bounds = tuple(zip(*model_bounds)) return model_params, fitparam_indices, model_bounds @deprecated('5.1', 'private method: _model_to_fit_params has been made public now') def _model_to_fit_params(model): return model_to_fit_params(model) def _validate_constraints(supported_constraints, model): """Make sure model constraints are supported by the current fitter.""" message = 'Optimizer cannot handle {0} constraints.' if (any(model.fixed.values()) and 'fixed' not in supported_constraints): raise UnsupportedConstraintError( message.format('fixed parameter')) if any(model.tied.values()) and 'tied' not in supported_constraints: raise UnsupportedConstraintError( message.format('tied parameter')) if (any(tuple(b) != (None, None) for b in model.bounds.values()) and 'bounds' not in supported_constraints): raise UnsupportedConstraintError( message.format('bound parameter')) if model.eqcons and 'eqcons' not in supported_constraints: raise UnsupportedConstraintError(message.format('equality')) if model.ineqcons and 'ineqcons' not in supported_constraints: raise UnsupportedConstraintError(message.format('inequality')) def _validate_model(model, supported_constraints): """ Check that model and fitter are compatible and return a copy of the model. """ if not model.fittable: raise ValueError("Model does not appear to be fittable.") if model.linear: warnings.warn('Model is linear in parameters; ' 'consider using linear fitting methods.', AstropyUserWarning) elif len(model) != 1: # for now only single data sets ca be fitted raise ValueError("Non-linear fitters can only fit " "one data set at a time.") _validate_constraints(supported_constraints, model) model_copy = model.copy() return model_copy def populate_entry_points(entry_points): """ This injects entry points into the `astropy.modeling.fitting` namespace. This provides a means of inserting a fitting routine without requirement of it being merged into astropy's core. Parameters ---------- entry_points : list of `~importlib.metadata.EntryPoint` entry_points are objects which encapsulate importable objects and are defined on the installation of a package. Notes ----- An explanation of entry points can be found `here <http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`_ """ for entry_point in entry_points: name = entry_point.name try: entry_point = entry_point.load() except Exception as e: # This stops the fitting from choking if an entry_point produces an error. warnings.warn(AstropyUserWarning( f'{type(e).__name__} error occurred in entry point {name}.')) else: if not inspect.isclass(entry_point): warnings.warn(AstropyUserWarning( f'Modeling entry point {name} expected to be a Class.')) else: if issubclass(entry_point, Fitter): name = entry_point.__name__ globals()[name] = entry_point __all__.append(name) else: warnings.warn(AstropyUserWarning( f"Modeling entry point {name} expected to extend " "astropy.modeling.Fitter")) def _populate_ep(): # TODO: Exclusively use select when Python minversion is 3.10 ep = entry_points() if hasattr(ep, 'select'): populate_entry_points(ep.select(group='astropy.modeling')) else: populate_entry_points(ep.get('astropy.modeling', [])) _populate_ep()
b9155d95d993304e8d4f440e565a3a8bfe1bd3b84b82286d3320d7b8d32949fb
# Licensed under a 3-clause BSD style license - see LICENSE.rst # pylint: disable=invalid-name """ This module defines classes that deal with parameters. It is unlikely users will need to work with these classes directly, unless they define their own models. """ import functools import numbers import operator import numpy as np from astropy.units import Quantity from astropy.utils import isiterable from .utils import array_repr_oneline, get_inputs_and_params __all__ = ['Parameter', 'InputParameterError', 'ParameterError'] class ParameterError(Exception): """Generic exception class for all exceptions pertaining to Parameters.""" class InputParameterError(ValueError, ParameterError): """Used for incorrect input parameter values and definitions.""" class ParameterDefinitionError(ParameterError): """Exception in declaration of class-level Parameters.""" def _tofloat(value): """Convert a parameter to float or float array""" if isiterable(value): try: value = np.asanyarray(value, dtype=float) except (TypeError, ValueError): # catch arrays with strings or user errors like different # types of parameters in a parameter set raise InputParameterError( f"Parameter of {type(value)} could not be converted to float") elif isinstance(value, Quantity): # Quantities are fine as is pass elif isinstance(value, np.ndarray): # A scalar/dimensionless array value = float(value.item()) elif isinstance(value, (numbers.Number, np.number)) and not isinstance(value, bool): value = float(value) elif isinstance(value, bool): raise InputParameterError( "Expected parameter to be of numerical type, not boolean") else: raise InputParameterError( f"Don't know how to convert parameter of {type(value)} to float") return value # Helpers for implementing operator overloading on Parameter def _binary_arithmetic_operation(op, reflected=False): @functools.wraps(op) def wrapper(self, val): if self.unit is not None: self_value = Quantity(self.value, self.unit) else: self_value = self.value if reflected: return op(val, self_value) else: return op(self_value, val) return wrapper def _binary_comparison_operation(op): @functools.wraps(op) def wrapper(self, val): if self.unit is not None: self_value = Quantity(self.value, self.unit) else: self_value = self.value return op(self_value, val) return wrapper def _unary_arithmetic_operation(op): @functools.wraps(op) def wrapper(self): if self.unit is not None: self_value = Quantity(self.value, self.unit) else: self_value = self.value return op(self_value) return wrapper class Parameter: """ Wraps individual parameters. Since 4.0 Parameters are no longer descriptors and are based on a new implementation of the Parameter class. Parameters now (as of 4.0) store values locally (as instead previously in the associated model) This class represents a model's parameter (in a somewhat broad sense). It serves a number of purposes: 1) A type to be recognized by models and treated specially at class initialization (i.e., if it is found that there is a class definition of a Parameter, the model initializer makes a copy at the instance level). 2) Managing the handling of allowable parameter values and once defined, ensuring updates are consistent with the Parameter definition. This includes the optional use of units and quantities as well as transforming values to an internally consistent representation (e.g., from degrees to radians through the use of getters and setters). 3) Holding attributes of parameters relevant to fitting, such as whether the parameter may be varied in fitting, or whether there are constraints that must be satisfied. See :ref:`astropy:modeling-parameters` for more details. Parameters ---------- name : str parameter name .. warning:: The fact that `Parameter` accepts ``name`` as an argument is an implementation detail, and should not be used directly. When defining a new `Model` class, parameter names are always automatically defined by the class attribute they're assigned to. description : str parameter description default : float or array default value to use for this parameter unit : `~astropy.units.Unit` if specified, the parameter will be in these units, and when the parameter is updated in future, it should be set to a :class:`~astropy.units.Quantity` that has equivalent units. getter : callable a function that wraps the raw (internal) value of the parameter when returning the value through the parameter proxy (eg. a parameter may be stored internally as radians but returned to the user as degrees) setter : callable a function that wraps any values assigned to this parameter; should be the inverse of getter fixed : bool if True the parameter is not varied during fitting tied : callable or False if callable is supplied it provides a way to link the value of this parameter to another parameter (or some other arbitrary function) min : float the lower bound of a parameter max : float the upper bound of a parameter bounds : tuple specify min and max as a single tuple--bounds may not be specified simultaneously with min or max """ constraints = ('fixed', 'tied', 'bounds') """ Types of constraints a parameter can have. Excludes 'min' and 'max' which are just aliases for the first and second elements of the 'bounds' constraint (which is represented as a 2-tuple). 'prior' and 'posterior' are available for use by user fitters but are not used by any built-in fitters as of this writing. """ def __init__(self, name='', description='', default=None, unit=None, getter=None, setter=None, fixed=False, tied=False, min=None, max=None, bounds=None, prior=None, posterior=None): super().__init__() self._model = None self._model_required = False self._setter = self._create_value_wrapper(setter, None) self._getter = self._create_value_wrapper(getter, None) self._name = name self.__doc__ = self._description = description.strip() # We only need to perform this check on unbound parameters if isinstance(default, Quantity): if unit is not None and not unit.is_equivalent(default.unit): raise ParameterDefinitionError( f"parameter default {default} does not have units equivalent to " f"the required unit {unit}") unit = default.unit default = default.value self._default = default self._unit = unit # Internal units correspond to raw_units held by the model in the # previous implementation. The private _getter and _setter methods # use this to convert to and from the public unit defined for the # parameter. self._internal_unit = None if not self._model_required: if self._default is not None: self.value = self._default else: self._value = None # NOTE: These are *default* constraints--on model instances constraints # are taken from the model if set, otherwise the defaults set here are # used if bounds is not None: if min is not None or max is not None: raise ValueError( "bounds may not be specified simultaneously with min or " f"max when instantiating Parameter {name}") else: bounds = (min, max) self._fixed = fixed self._tied = tied self._bounds = bounds self._order = None self._validator = None self._prior = prior self._posterior = posterior self._std = None def __set_name__(self, owner, name): self._name = name def __len__(self): val = self.value if val.shape == (): return 1 else: return val.shape[0] def __getitem__(self, key): value = self.value if len(value.shape) == 0: # Wrap the value in a list so that getitem can work for sensible # indices like [0] and [-1] value = [value] return value[key] def __setitem__(self, key, value): # Get the existing value and check whether it even makes sense to # apply this index oldvalue = self.value if isinstance(key, slice): if len(oldvalue[key]) == 0: raise InputParameterError( "Slice assignment outside the parameter dimensions for " f"'{self.name}'") for idx, val in zip(range(*key.indices(len(self))), value): self.__setitem__(idx, val) else: try: oldvalue[key] = value except IndexError: raise InputParameterError( f"Input dimension {key} invalid for {self.name!r} parameter with " f"dimension {value.shape[0]}") # likely wrong def __repr__(self): args = f"'{self._name}'" args += f', value={self.value}' if self.unit is not None: args += f', unit={self.unit}' for cons in self.constraints: val = getattr(self, cons) if val not in (None, False, (None, None)): # Maybe non-obvious, but False is the default for the fixed and # tied constraints args += f', {cons}={val}' return f"{self.__class__.__name__}({args})" @property def name(self): """Parameter name""" return self._name @property def default(self): """Parameter default value""" return self._default @property def value(self): """The unadorned value proxied by this parameter.""" if self._getter is None and self._setter is None: return np.float64(self._value) else: # This new implementation uses the names of internal_unit # in place of raw_unit used previously. The contrast between # internal values and units is that between the public # units that the parameter advertises to what it actually # uses internally. if self.internal_unit: return np.float64(self._getter(self._internal_value, self.internal_unit, self.unit).value) elif self._getter: return np.float64(self._getter(self._internal_value)) elif self._setter: return np.float64(self._internal_value) @value.setter def value(self, value): if isinstance(value, Quantity): raise TypeError("The .value property on parameters should be set" " to unitless values, not Quantity objects. To set" "a parameter to a quantity simply set the " "parameter directly without using .value") if self._setter is None: self._value = np.array(value, dtype=np.float64) else: self._internal_value = np.array(self._setter(value), dtype=np.float64) @property def unit(self): """ The unit attached to this parameter, if any. On unbound parameters (i.e. parameters accessed through the model class, rather than a model instance) this is the required/ default unit for the parameter. """ return self._unit @unit.setter def unit(self, unit): if self.unit is None: raise ValueError('Cannot attach units to parameters that were ' 'not initially specified with units') else: raise ValueError('Cannot change the unit attribute directly, ' 'instead change the parameter to a new quantity') def _set_unit(self, unit, force=False): if force: self._unit = unit else: self.unit = unit @property def internal_unit(self): """ Return the internal unit the parameter uses for the internal value stored """ return self._internal_unit @internal_unit.setter def internal_unit(self, internal_unit): """ Set the unit the parameter will convert the supplied value to the representation used internally. """ self._internal_unit = internal_unit @property def quantity(self): """ This parameter, as a :class:`~astropy.units.Quantity` instance. """ if self.unit is None: return None return self.value * self.unit @quantity.setter def quantity(self, quantity): if not isinstance(quantity, Quantity): raise TypeError("The .quantity attribute should be set " "to a Quantity object") self.value = quantity.value self._unit = quantity.unit @property def shape(self): """The shape of this parameter's value array.""" if self._setter is None: return self._value.shape return self._internal_value.shape @shape.setter def shape(self, value): if isinstance(self.value, np.generic): if value not in ((), (1,)): raise ValueError("Cannot assign this shape to a scalar quantity") else: self.value.shape = value @property def size(self): """The size of this parameter's value array.""" return np.size(self.value) @property def std(self): """Standard deviation, if available from fit.""" return self._std @std.setter def std(self, value): self._std = value @property def prior(self): return self._prior @prior.setter def prior(self, val): self._prior = val @property def posterior(self): return self._posterior @posterior.setter def posterior(self, val): self._posterior = val @property def fixed(self): """ Boolean indicating if the parameter is kept fixed during fitting. """ return self._fixed @fixed.setter def fixed(self, value): """ Fix a parameter. """ if not isinstance(value, bool): raise ValueError("Value must be boolean") self._fixed = value @property def tied(self): """ Indicates that this parameter is linked to another one. A callable which provides the relationship of the two parameters. """ return self._tied @tied.setter def tied(self, value): """Tie a parameter""" if not callable(value) and value not in (False, None): raise TypeError("Tied must be a callable or set to False or None") self._tied = value @property def bounds(self): """The minimum and maximum values of a parameter as a tuple""" return self._bounds @bounds.setter def bounds(self, value): """Set the minimum and maximum values of a parameter from a tuple""" _min, _max = value if _min is not None: if not isinstance(_min, (numbers.Number, Quantity)): raise TypeError("Min value must be a number or a Quantity") if isinstance(_min, Quantity): _min = float(_min.value) else: _min = float(_min) if _max is not None: if not isinstance(_max, (numbers.Number, Quantity)): raise TypeError("Max value must be a number or a Quantity") if isinstance(_max, Quantity): _max = float(_max.value) else: _max = float(_max) self._bounds = (_min, _max) @property def min(self): """A value used as a lower bound when fitting a parameter""" return self.bounds[0] @min.setter def min(self, value): """Set a minimum value of a parameter""" self.bounds = (value, self.max) @property def max(self): """A value used as an upper bound when fitting a parameter""" return self.bounds[1] @max.setter def max(self, value): """Set a maximum value of a parameter.""" self.bounds = (self.min, value) @property def validator(self): """ Used as a decorator to set the validator method for a `Parameter`. The validator method validates any value set for that parameter. It takes two arguments--``self``, which refers to the `Model` instance (remember, this is a method defined on a `Model`), and the value being set for this parameter. The validator method's return value is ignored, but it may raise an exception if the value set on the parameter is invalid (typically an `InputParameterError` should be raised, though this is not currently a requirement). """ def validator(func, self=self): if callable(func): self._validator = func return self else: raise ValueError("This decorator method expects a callable.\n" "The use of this method as a direct validator is\n" "deprecated; use the new validate method instead\n") return validator def validate(self, value): """ Run the validator on this parameter""" if self._validator is not None and self._model is not None: self._validator(self._model, value) def copy(self, name=None, description=None, default=None, unit=None, getter=None, setter=None, fixed=False, tied=False, min=None, max=None, bounds=None, prior=None, posterior=None): """ Make a copy of this `Parameter`, overriding any of its core attributes in the process (or an exact copy). The arguments to this method are the same as those for the `Parameter` initializer. This simply returns a new `Parameter` instance with any or all of the attributes overridden, and so returns the equivalent of: .. code:: python Parameter(self.name, self.description, ...) """ kwargs = locals().copy() del kwargs['self'] for key, value in kwargs.items(): if value is None: # Annoying special cases for min/max where are just aliases for # the components of bounds if key in ('min', 'max'): continue else: if hasattr(self, key): value = getattr(self, key) elif hasattr(self, '_' + key): value = getattr(self, '_' + key) kwargs[key] = value return self.__class__(**kwargs) @property def model(self): """ Return the model this parameter is associated with.""" return self._model @model.setter def model(self, value): self._model = value self._setter = self._create_value_wrapper(self._setter, value) self._getter = self._create_value_wrapper(self._getter, value) if self._model_required: if self._default is not None: self.value = self._default else: self._value = None @property def _raw_value(self): """ Currently for internal use only. Like Parameter.value but does not pass the result through Parameter.getter. By design this should only be used from bound parameters. This will probably be removed are retweaked at some point in the process of rethinking how parameter values are stored/updated. """ if self._setter: return self._internal_value return self.value def _create_value_wrapper(self, wrapper, model): """Wraps a getter/setter function to support optionally passing in a reference to the model object as the second argument. If a model is tied to this parameter and its getter/setter supports a second argument then this creates a partial function using the model instance as the second argument. """ if isinstance(wrapper, np.ufunc): if wrapper.nin != 1: raise TypeError("A numpy.ufunc used for Parameter " "getter/setter may only take one input " "argument") elif wrapper is None: # Just allow non-wrappers to fall through silently, for convenience return None else: inputs, _ = get_inputs_and_params(wrapper) nargs = len(inputs) if nargs == 1: pass elif nargs == 2: self._model_required = True if model is not None: # Don't make a partial function unless we're tied to a # specific model instance model_arg = inputs[1].name wrapper = functools.partial(wrapper, **{model_arg: model}) else: raise TypeError("Parameter getter/setter must be a function " "of either one or two arguments") return wrapper def __array__(self, dtype=None): # Make np.asarray(self) work a little more straightforwardly arr = np.asarray(self.value, dtype=dtype) if self.unit is not None: arr = Quantity(arr, self.unit, copy=False) return arr def __bool__(self): return bool(np.all(self.value)) __add__ = _binary_arithmetic_operation(operator.add) __radd__ = _binary_arithmetic_operation(operator.add, reflected=True) __sub__ = _binary_arithmetic_operation(operator.sub) __rsub__ = _binary_arithmetic_operation(operator.sub, reflected=True) __mul__ = _binary_arithmetic_operation(operator.mul) __rmul__ = _binary_arithmetic_operation(operator.mul, reflected=True) __pow__ = _binary_arithmetic_operation(operator.pow) __rpow__ = _binary_arithmetic_operation(operator.pow, reflected=True) __truediv__ = _binary_arithmetic_operation(operator.truediv) __rtruediv__ = _binary_arithmetic_operation(operator.truediv, reflected=True) __eq__ = _binary_comparison_operation(operator.eq) __ne__ = _binary_comparison_operation(operator.ne) __lt__ = _binary_comparison_operation(operator.lt) __gt__ = _binary_comparison_operation(operator.gt) __le__ = _binary_comparison_operation(operator.le) __ge__ = _binary_comparison_operation(operator.ge) __neg__ = _unary_arithmetic_operation(operator.neg) __abs__ = _unary_arithmetic_operation(operator.abs) def param_repr_oneline(param): """ Like array_repr_oneline but works on `Parameter` objects and supports rendering parameters with units like quantities. """ out = array_repr_oneline(param.value) if param.unit is not None: out = f'{out} {param.unit!s}' return out
fade95c48e8dd081ff4b3421a1c1605ff5e58e3daf8ebb826a8e2bc43bfc564c
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module defines base classes for all models. The base class of all models is `~astropy.modeling.Model`. `~astropy.modeling.FittableModel` is the base class for all fittable models. Fittable models can be linear or nonlinear in a regression analysis sense. All models provide a `__call__` method which performs the transformation in a purely mathematical way, i.e. the models are unitless. Model instances can represent either a single model, or a "model set" representing multiple copies of the same type of model, but with potentially different values of the parameters in each model making up the set. """ # pylint: disable=invalid-name, protected-access, redefined-outer-name import abc import copy import functools import inspect import itertools import operator import types from collections import defaultdict, deque from inspect import signature from itertools import chain import numpy as np from astropy.nddata.utils import add_array, extract_array from astropy.table import Table from astropy.units import Quantity, UnitsError, dimensionless_unscaled from astropy.units.utils import quantity_asanyarray from astropy.utils import ( IncompatibleShapeError, check_broadcast, find_current_module, indent, isiterable, metadata, sharedmethod) from astropy.utils.codegen import make_function_with_signature from .bounding_box import CompoundBoundingBox, ModelBoundingBox from .parameters import InputParameterError, Parameter, _tofloat, param_repr_oneline from .utils import ( _combine_equivalency_dict, _ConstraintsDict, _SpecialOperatorsDict, combine_labels, get_inputs_and_params, make_binary_operator_eval) __all__ = ['Model', 'FittableModel', 'Fittable1DModel', 'Fittable2DModel', 'CompoundModel', 'fix_inputs', 'custom_model', 'ModelDefinitionError', 'bind_bounding_box', 'bind_compound_bounding_box'] def _model_oper(oper, **kwargs): """ Returns a function that evaluates a given Python arithmetic operator between two models. The operator should be given as a string, like ``'+'`` or ``'**'``. """ return lambda left, right: CompoundModel(oper, left, right, **kwargs) class ModelDefinitionError(TypeError): """Used for incorrect models definitions.""" class _ModelMeta(abc.ABCMeta): """ Metaclass for Model. Currently just handles auto-generating the param_names list based on Parameter descriptors declared at the class-level of Model subclasses. """ _is_dynamic = False """ This flag signifies whether this class was created in the "normal" way, with a class statement in the body of a module, as opposed to a call to `type` or some other metaclass constructor, such that the resulting class does not belong to a specific module. This is important for pickling of dynamic classes. This flag is always forced to False for new classes, so code that creates dynamic classes should manually set it to True on those classes when creating them. """ # Default empty dict for _parameters_, which will be empty on model # classes that don't have any Parameters def __new__(mcls, name, bases, members, **kwds): # See the docstring for _is_dynamic above if '_is_dynamic' not in members: members['_is_dynamic'] = mcls._is_dynamic opermethods = [ ('__add__', _model_oper('+')), ('__sub__', _model_oper('-')), ('__mul__', _model_oper('*')), ('__truediv__', _model_oper('/')), ('__pow__', _model_oper('**')), ('__or__', _model_oper('|')), ('__and__', _model_oper('&')), ('_fix_inputs', _model_oper('fix_inputs')) ] members['_parameters_'] = {k: v for k, v in members.items() if isinstance(v, Parameter)} for opermethod, opercall in opermethods: members[opermethod] = opercall cls = super().__new__(mcls, name, bases, members, **kwds) param_names = list(members['_parameters_']) # Need to walk each base MRO to collect all parameter names for base in bases: for tbase in base.__mro__: if issubclass(tbase, Model): # Preserve order of definitions param_names = list(tbase._parameters_) + param_names # Remove duplicates (arising from redefinition in subclass). param_names = list(dict.fromkeys(param_names)) if cls._parameters_: if hasattr(cls, '_param_names'): # Slight kludge to support compound models, where # cls.param_names is a property; could be improved with a # little refactoring but fine for now cls._param_names = tuple(param_names) else: cls.param_names = tuple(param_names) return cls def __init__(cls, name, bases, members, **kwds): super(_ModelMeta, cls).__init__(name, bases, members, **kwds) cls._create_inverse_property(members) cls._create_bounding_box_property(members) pdict = {} for base in bases: for tbase in base.__mro__: if issubclass(tbase, Model): for parname, val in cls._parameters_.items(): pdict[parname] = val cls._handle_special_methods(members, pdict) def __repr__(cls): """ Custom repr for Model subclasses. """ return cls._format_cls_repr() def _repr_pretty_(cls, p, cycle): """ Repr for IPython's pretty printer. By default IPython "pretty prints" classes, so we need to implement this so that IPython displays the custom repr for Models. """ p.text(repr(cls)) def __reduce__(cls): if not cls._is_dynamic: # Just return a string specifying where the class can be imported # from return cls.__name__ members = dict(cls.__dict__) # Delete any ABC-related attributes--these will be restored when # the class is reconstructed: for key in list(members): if key.startswith('_abc_'): del members[key] # Delete custom __init__ and __call__ if they exist: for key in ('__init__', '__call__'): if key in members: del members[key] return (type(cls), (cls.__name__, cls.__bases__, members)) @property def name(cls): """ The name of this model class--equivalent to ``cls.__name__``. This attribute is provided for symmetry with the `Model.name` attribute of model instances. """ return cls.__name__ @property def _is_concrete(cls): """ A class-level property that determines whether the class is a concrete implementation of a Model--i.e. it is not some abstract base class or internal implementation detail (i.e. begins with '_'). """ return not (cls.__name__.startswith('_') or inspect.isabstract(cls)) def rename(cls, name=None, inputs=None, outputs=None): """ Creates a copy of this model class with a new name, inputs or outputs. The new class is technically a subclass of the original class, so that instance and type checks will still work. For example:: >>> from astropy.modeling.models import Rotation2D >>> SkyRotation = Rotation2D.rename('SkyRotation') >>> SkyRotation <class 'astropy.modeling.core.SkyRotation'> Name: SkyRotation (Rotation2D) N_inputs: 2 N_outputs: 2 Fittable parameters: ('angle',) >>> issubclass(SkyRotation, Rotation2D) True >>> r = SkyRotation(90) >>> isinstance(r, Rotation2D) True """ mod = find_current_module(2) if mod: modname = mod.__name__ else: modname = '__main__' if name is None: name = cls.name if inputs is None: inputs = cls.inputs else: if not isinstance(inputs, tuple): raise TypeError("Expected 'inputs' to be a tuple of strings.") elif len(inputs) != len(cls.inputs): raise ValueError(f'{cls.name} expects {len(cls.inputs)} inputs') if outputs is None: outputs = cls.outputs else: if not isinstance(outputs, tuple): raise TypeError("Expected 'outputs' to be a tuple of strings.") elif len(outputs) != len(cls.outputs): raise ValueError(f'{cls.name} expects {len(cls.outputs)} outputs') new_cls = type(name, (cls,), {"inputs": inputs, "outputs": outputs}) new_cls.__module__ = modname new_cls.__qualname__ = name return new_cls def _create_inverse_property(cls, members): inverse = members.get('inverse') if inverse is None or cls.__bases__[0] is object: # The latter clause is the prevent the below code from running on # the Model base class, which implements the default getter and # setter for .inverse return if isinstance(inverse, property): # We allow the @property decorator to be omitted entirely from # the class definition, though its use should be encouraged for # clarity inverse = inverse.fget # Store the inverse getter internally, then delete the given .inverse # attribute so that cls.inverse resolves to Model.inverse instead cls._inverse = inverse del cls.inverse def _create_bounding_box_property(cls, members): """ Takes any bounding_box defined on a concrete Model subclass (either as a fixed tuple or a property or method) and wraps it in the generic getter/setter interface for the bounding_box attribute. """ # TODO: Much of this is verbatim from _create_inverse_property--I feel # like there could be a way to generify properties that work this way, # but for the time being that would probably only confuse things more. bounding_box = members.get('bounding_box') if bounding_box is None or cls.__bases__[0] is object: return if isinstance(bounding_box, property): bounding_box = bounding_box.fget if not callable(bounding_box): # See if it's a hard-coded bounding_box (as a sequence) and # normalize it try: bounding_box = ModelBoundingBox.validate(cls, bounding_box, _preserve_ignore=True) except ValueError as exc: raise ModelDefinitionError(exc.args[0]) else: sig = signature(bounding_box) # May be a method that only takes 'self' as an argument (like a # property, but the @property decorator was forgotten) # # However, if the method takes additional arguments then this is a # parameterized bounding box and should be callable if len(sig.parameters) > 1: bounding_box = cls._create_bounding_box_subclass(bounding_box, sig) # See the Model.bounding_box getter definition for how this attribute # is used cls._bounding_box = bounding_box del cls.bounding_box def _create_bounding_box_subclass(cls, func, sig): """ For Models that take optional arguments for defining their bounding box, we create a subclass of ModelBoundingBox with a ``__call__`` method that supports those additional arguments. Takes the function's Signature as an argument since that is already computed in _create_bounding_box_property, so no need to duplicate that effort. """ # TODO: Might be convenient if calling the bounding box also # automatically sets the _user_bounding_box. So that # # >>> model.bounding_box(arg=1) # # in addition to returning the computed bbox, also sets it, so that # it's a shortcut for # # >>> model.bounding_box = model.bounding_box(arg=1) # # Not sure if that would be non-obvious / confusing though... def __call__(self, **kwargs): return func(self._model, **kwargs) kwargs = [] for idx, param in enumerate(sig.parameters.values()): if idx == 0: # Presumed to be a 'self' argument continue if param.default is param.empty: raise ModelDefinitionError( f"The bounding_box method for {cls.name} is not correctly " "defined: If defined as a method all arguments to that " "method (besides self) must be keyword arguments with " "default values that can be used to compute a default " "bounding box.") kwargs.append((param.name, param.default)) __call__.__signature__ = sig return type(f"{cls.name}ModelBoundingBox", (ModelBoundingBox,), {'__call__': __call__}) def _handle_special_methods(cls, members, pdict): # Handle init creation from inputs def update_wrapper(wrapper, cls): # Set up the new __call__'s metadata attributes as though it were # manually defined in the class definition # A bit like functools.update_wrapper but uses the class instead of # the wrapped function wrapper.__module__ = cls.__module__ wrapper.__doc__ = getattr(cls, wrapper.__name__).__doc__ if hasattr(cls, '__qualname__'): wrapper.__qualname__ = f'{cls.__qualname__}.{wrapper.__name__}' if ('__call__' not in members and 'n_inputs' in members and isinstance(members['n_inputs'], int) and members['n_inputs'] > 0): # Don't create a custom __call__ for classes that already have one # explicitly defined (this includes the Model base class, and any # other classes that manually override __call__ def __call__(self, *inputs, **kwargs): """Evaluate this model on the supplied inputs.""" return super(cls, self).__call__(*inputs, **kwargs) # When called, models can take two optional keyword arguments: # # * model_set_axis, which indicates (for multi-dimensional input) # which axis is used to indicate different models # # * equivalencies, a dictionary of equivalencies to be applied to # the input values, where each key should correspond to one of # the inputs. # # The following code creates the __call__ function with these # two keyword arguments. args = ('self',) kwargs = dict([('model_set_axis', None), ('with_bounding_box', False), ('fill_value', np.nan), ('equivalencies', None), ('inputs_map', None)]) new_call = make_function_with_signature( __call__, args, kwargs, varargs='inputs', varkwargs='new_inputs') # The following makes it look like __call__ # was defined in the class update_wrapper(new_call, cls) cls.__call__ = new_call if ('__init__' not in members and not inspect.isabstract(cls) and cls._parameters_): # Build list of all parameters including inherited ones # If *all* the parameters have default values we can make them # keyword arguments; otherwise they must all be positional # arguments if all(p.default is not None for p in pdict.values()): args = ('self',) kwargs = [] for param_name, param_val in pdict.items(): default = param_val.default unit = param_val.unit # If the unit was specified in the parameter but the # default is not a Quantity, attach the unit to the # default. if unit is not None: default = Quantity(default, unit, copy=False) kwargs.append((param_name, default)) else: args = ('self',) + tuple(pdict.keys()) kwargs = {} def __init__(self, *params, **kwargs): return super(cls, self).__init__(*params, **kwargs) new_init = make_function_with_signature( __init__, args, kwargs, varkwargs='kwargs') update_wrapper(new_init, cls) cls.__init__ = new_init # *** Arithmetic operators for creating compound models *** __add__ = _model_oper('+') __sub__ = _model_oper('-') __mul__ = _model_oper('*') __truediv__ = _model_oper('/') __pow__ = _model_oper('**') __or__ = _model_oper('|') __and__ = _model_oper('&') _fix_inputs = _model_oper('fix_inputs') # *** Other utilities *** def _format_cls_repr(cls, keywords=[]): """ Internal implementation of ``__repr__``. This is separated out for ease of use by subclasses that wish to override the default ``__repr__`` while keeping the same basic formatting. """ # For the sake of familiarity start the output with the standard class # __repr__ parts = [super().__repr__()] if not cls._is_concrete: return parts[0] def format_inheritance(cls): bases = [] for base in cls.mro()[1:]: if not issubclass(base, Model): continue elif (inspect.isabstract(base) or base.__name__.startswith('_')): break bases.append(base.name) if bases: return f"{cls.name} ({' -> '.join(bases)})" return cls.name try: default_keywords = [ ('Name', format_inheritance(cls)), ('N_inputs', cls.n_inputs), ('N_outputs', cls.n_outputs), ] if cls.param_names: default_keywords.append(('Fittable parameters', cls.param_names)) for keyword, value in default_keywords + keywords: if value is not None: parts.append(f'{keyword}: {value}') return '\n'.join(parts) except Exception: # If any of the above formatting fails fall back on the basic repr # (this is particularly useful in debugging) return parts[0] class Model(metaclass=_ModelMeta): """ Base class for all models. This is an abstract class and should not be instantiated directly. The following initialization arguments apply to the majority of Model subclasses by default (exceptions include specialized utility models like `~astropy.modeling.mappings.Mapping`). Parametric models take all their parameters as arguments, followed by any of the following optional keyword arguments: Parameters ---------- name : str, optional A human-friendly name associated with this model instance (particularly useful for identifying the individual components of a compound model). meta : dict, optional An optional dict of user-defined metadata to attach to this model. How this is used and interpreted is up to the user or individual use case. n_models : int, optional If given an integer greater than 1, a *model set* is instantiated instead of a single model. This affects how the parameter arguments are interpreted. In this case each parameter must be given as a list or array--elements of this array are taken along the first axis (or ``model_set_axis`` if specified), such that the Nth element is the value of that parameter for the Nth model in the set. See the section on model sets in the documentation for more details. model_set_axis : int, optional This argument only applies when creating a model set (i.e. ``n_models > 1``). It changes how parameter values are interpreted. Normally the first axis of each input parameter array (properly the 0th axis) is taken as the axis corresponding to the model sets. However, any axis of an input array may be taken as this "model set axis". This accepts negative integers as well--for example use ``model_set_axis=-1`` if the last (most rapidly changing) axis should be associated with the model sets. Also, ``model_set_axis=False`` can be used to tell that a given input should be used to evaluate all the models in the model set. fixed : dict, optional Dictionary ``{parameter_name: bool}`` setting the fixed constraint for one or more parameters. `True` means the parameter is held fixed during fitting and is prevented from updates once an instance of the model has been created. Alternatively the `~astropy.modeling.Parameter.fixed` property of a parameter may be used to lock or unlock individual parameters. tied : dict, optional Dictionary ``{parameter_name: callable}`` of parameters which are linked to some other parameter. The dictionary values are callables providing the linking relationship. Alternatively the `~astropy.modeling.Parameter.tied` property of a parameter may be used to set the ``tied`` constraint on individual parameters. bounds : dict, optional A dictionary ``{parameter_name: value}`` of lower and upper bounds of parameters. Keys are parameter names. Values are a list or a tuple of length 2 giving the desired range for the parameter. Alternatively the `~astropy.modeling.Parameter.min` and `~astropy.modeling.Parameter.max` or ~astropy.modeling.Parameter.bounds` properties of a parameter may be used to set bounds on individual parameters. eqcons : list, optional List of functions of length n such that ``eqcons[j](x0, *args) == 0.0`` in a successfully optimized problem. ineqcons : list, optional List of functions of length n such that ``ieqcons[j](x0, *args) >= 0.0`` is a successfully optimized problem. Examples -------- >>> from astropy.modeling import models >>> def tie_center(model): ... mean = 50 * model.stddev ... return mean >>> tied_parameters = {'mean': tie_center} Specify that ``'mean'`` is a tied parameter in one of two ways: >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3, ... tied=tied_parameters) or >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3) >>> g1.mean.tied False >>> g1.mean.tied = tie_center >>> g1.mean.tied <function tie_center at 0x...> Fixed parameters: >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3, ... fixed={'stddev': True}) >>> g1.stddev.fixed True or >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3) >>> g1.stddev.fixed False >>> g1.stddev.fixed = True >>> g1.stddev.fixed True """ parameter_constraints = Parameter.constraints """ Primarily for informational purposes, these are the types of constraints that can be set on a model's parameters. """ model_constraints = ('eqcons', 'ineqcons') """ Primarily for informational purposes, these are the types of constraints that constrain model evaluation. """ param_names = () """ Names of the parameters that describe models of this type. The parameters in this tuple are in the same order they should be passed in when initializing a model of a specific type. Some types of models, such as polynomial models, have a different number of parameters depending on some other property of the model, such as the degree. When defining a custom model class the value of this attribute is automatically set by the `~astropy.modeling.Parameter` attributes defined in the class body. """ n_inputs = 0 """The number of inputs.""" n_outputs = 0 """ The number of outputs.""" standard_broadcasting = True fittable = False linear = True _separable = None """ A boolean flag to indicate whether a model is separable.""" meta = metadata.MetaData() """A dict-like object to store optional information.""" # By default models either use their own inverse property or have no # inverse at all, but users may also assign a custom inverse to a model, # optionally; in that case it is of course up to the user to determine # whether their inverse is *actually* an inverse to the model they assign # it to. _inverse = None _user_inverse = None _bounding_box = None _user_bounding_box = None _has_inverse_bounding_box = False # Default n_models attribute, so that __len__ is still defined even when a # model hasn't completed initialization yet _n_models = 1 # New classes can set this as a boolean value. # It is converted to a dictionary mapping input name to a boolean value. _input_units_strict = False # Allow dimensionless input (and corresponding output). If this is True, # input values to evaluate will gain the units specified in input_units. If # this is a dictionary then it should map input name to a bool to allow # dimensionless numbers for that input. # Only has an effect if input_units is defined. _input_units_allow_dimensionless = False # Default equivalencies to apply to input values. If set, this should be a # dictionary where each key is a string that corresponds to one of the # model inputs. Only has an effect if input_units is defined. input_units_equivalencies = None # Covariance matrix can be set by fitter if available. # If cov_matrix is available, then std will set as well _cov_matrix = None _stds = None def __init_subclass__(cls, **kwargs): super().__init_subclass__() def __init__(self, *args, meta=None, name=None, **kwargs): super().__init__() self._default_inputs_outputs() if meta is not None: self.meta = meta self._name = name # add parameters to instance level by walking MRO list mro = self.__class__.__mro__ for cls in mro: if issubclass(cls, Model): for parname, val in cls._parameters_.items(): newpar = copy.deepcopy(val) newpar.model = self if parname not in self.__dict__: self.__dict__[parname] = newpar self._initialize_constraints(kwargs) kwargs = self._initialize_setters(kwargs) # Remaining keyword args are either parameter values or invalid # Parameter values must be passed in as keyword arguments in order to # distinguish them self._initialize_parameters(args, kwargs) self._initialize_slices() self._initialize_unit_support() def _default_inputs_outputs(self): if self.n_inputs == 1 and self.n_outputs == 1: self._inputs = ("x",) self._outputs = ("y",) elif self.n_inputs == 2 and self.n_outputs == 1: self._inputs = ("x", "y") self._outputs = ("z",) else: try: self._inputs = tuple("x" + str(idx) for idx in range(self.n_inputs)) self._outputs = tuple("x" + str(idx) for idx in range(self.n_outputs)) except TypeError: # self.n_inputs and self.n_outputs are properties # This is the case when subclasses of Model do not define # ``n_inputs``, ``n_outputs``, ``inputs`` or ``outputs``. self._inputs = () self._outputs = () def _initialize_setters(self, kwargs): """ This exists to inject defaults for settable properties for models originating from `custom_model`. """ if hasattr(self, '_settable_properties'): setters = {name: kwargs.pop(name, default) for name, default in self._settable_properties.items()} for name, value in setters.items(): setattr(self, name, value) return kwargs @property def inputs(self): return self._inputs @inputs.setter def inputs(self, val): if len(val) != self.n_inputs: raise ValueError(f"Expected {self.n_inputs} number of inputs, got {len(val)}.") self._inputs = val self._initialize_unit_support() @property def outputs(self): return self._outputs @outputs.setter def outputs(self, val): if len(val) != self.n_outputs: raise ValueError(f"Expected {self.n_outputs} number of outputs, got {len(val)}.") self._outputs = val @property def n_inputs(self): # TODO: remove the code in the ``if`` block when support # for models with ``inputs`` as class variables is removed. if hasattr(self.__class__, 'n_inputs') and isinstance(self.__class__.n_inputs, property): try: return len(self.__class__.inputs) except TypeError: try: return len(self.inputs) except AttributeError: return 0 return self.__class__.n_inputs @property def n_outputs(self): # TODO: remove the code in the ``if`` block when support # for models with ``outputs`` as class variables is removed. if hasattr(self.__class__, 'n_outputs') and isinstance(self.__class__.n_outputs, property): try: return len(self.__class__.outputs) except TypeError: try: return len(self.outputs) except AttributeError: return 0 return self.__class__.n_outputs def _calculate_separability_matrix(self): """ This is a hook which customises the behavior of modeling.separable. This allows complex subclasses to customise the separability matrix. If it returns `NotImplemented` the default behavior is used. """ return NotImplemented def _initialize_unit_support(self): """ Convert self._input_units_strict and self.input_units_allow_dimensionless to dictionaries mapping input name to a boolean value. """ if isinstance(self._input_units_strict, bool): self._input_units_strict = {key: self._input_units_strict for key in self.inputs} if isinstance(self._input_units_allow_dimensionless, bool): self._input_units_allow_dimensionless = {key: self._input_units_allow_dimensionless for key in self.inputs} @property def input_units_strict(self): """ Enforce strict units on inputs to evaluate. If this is set to True, input values to evaluate will be in the exact units specified by input_units. If the input quantities are convertible to input_units, they are converted. If this is a dictionary then it should map input name to a bool to set strict input units for that parameter. """ val = self._input_units_strict if isinstance(val, bool): return {key: val for key in self.inputs} return dict(zip(self.inputs, val.values())) @property def input_units_allow_dimensionless(self): """ Allow dimensionless input (and corresponding output). If this is True, input values to evaluate will gain the units specified in input_units. If this is a dictionary then it should map input name to a bool to allow dimensionless numbers for that input. Only has an effect if input_units is defined. """ val = self._input_units_allow_dimensionless if isinstance(val, bool): return {key: val for key in self.inputs} return dict(zip(self.inputs, val.values())) @property def uses_quantity(self): """ True if this model has been created with `~astropy.units.Quantity` objects or if there are no parameters. This can be used to determine if this model should be evaluated with `~astropy.units.Quantity` or regular floats. """ pisq = [isinstance(p, Quantity) for p in self._param_sets(units=True)] return (len(pisq) == 0) or any(pisq) def __repr__(self): return self._format_repr() def __str__(self): return self._format_str() def __len__(self): return self._n_models @staticmethod def _strip_ones(intup): return tuple(item for item in intup if item != 1) def __setattr__(self, attr, value): if isinstance(self, CompoundModel): param_names = self._param_names param_names = self.param_names if param_names is not None and attr in self.param_names: param = self.__dict__[attr] value = _tofloat(value) if param._validator is not None: param._validator(self, value) # check consistency with previous shape and size eshape = self._param_metrics[attr]['shape'] if eshape == (): eshape = (1,) vshape = np.array(value).shape if vshape == (): vshape = (1,) esize = self._param_metrics[attr]['size'] if (np.size(value) != esize or self._strip_ones(vshape) != self._strip_ones(eshape)): raise InputParameterError( f"Value for parameter {attr} does not match shape or size\n" f"expected by model ({vshape}, {np.size(value)}) vs ({eshape}, {esize})") if param.unit is None: if isinstance(value, Quantity): param._unit = value.unit param.value = value.value else: param.value = value else: if not isinstance(value, Quantity): raise UnitsError(f"The '{param.name}' parameter should be given as a" " Quantity because it was originally " "initialized as a Quantity") param._unit = value.unit param.value = value.value else: if attr in ['fittable', 'linear']: self.__dict__[attr] = value else: super().__setattr__(attr, value) def _pre_evaluate(self, *args, **kwargs): """ Model specific input setup that needs to occur prior to model evaluation """ # Broadcast inputs into common size inputs, broadcasted_shapes = self.prepare_inputs(*args, **kwargs) # Setup actual model evaluation method parameters = self._param_sets(raw=True, units=True) def evaluate(_inputs): return self.evaluate(*chain(_inputs, parameters)) return evaluate, inputs, broadcasted_shapes, kwargs def get_bounding_box(self, with_bbox=True): """ Return the ``bounding_box`` of a model if it exists or ``None`` otherwise. Parameters ---------- with_bbox : The value of the ``with_bounding_box`` keyword argument when calling the model. Default is `True` for usage when looking up the model's ``bounding_box`` without risk of error. """ bbox = None if not isinstance(with_bbox, bool) or with_bbox: try: bbox = self.bounding_box except NotImplementedError: pass if isinstance(bbox, CompoundBoundingBox) and not isinstance(with_bbox, bool): bbox = bbox[with_bbox] return bbox @property def _argnames(self): """The inputs used to determine input_shape for bounding_box evaluation""" return self.inputs def _validate_input_shape(self, _input, idx, argnames, model_set_axis, check_model_set_axis): """ Perform basic validation of a single model input's shape -- it has the minimum dimensions for the given model_set_axis Returns the shape of the input if validation succeeds. """ input_shape = np.shape(_input) # Ensure that the input's model_set_axis matches the model's # n_models if input_shape and check_model_set_axis: # Note: Scalar inputs *only* get a pass on this if len(input_shape) < model_set_axis + 1: raise ValueError( f"For model_set_axis={model_set_axis}, all inputs must be at " f"least {model_set_axis + 1}-dimensional.") if input_shape[model_set_axis] != self._n_models: try: argname = argnames[idx] except IndexError: # the case of model.inputs = () argname = str(idx) raise ValueError( f"Input argument '{argname}' does not have the correct " f"dimensions in model_set_axis={model_set_axis} for a model set with " f"n_models={self._n_models}.") return input_shape def _validate_input_shapes(self, inputs, argnames, model_set_axis): """ Perform basic validation of model inputs --that they are mutually broadcastable and that they have the minimum dimensions for the given model_set_axis. If validation succeeds, returns the total shape that will result from broadcasting the input arrays with each other. """ check_model_set_axis = self._n_models > 1 and model_set_axis is not False all_shapes = [] for idx, _input in enumerate(inputs): all_shapes.append(self._validate_input_shape(_input, idx, argnames, model_set_axis, check_model_set_axis)) input_shape = check_broadcast(*all_shapes) if input_shape is None: raise ValueError( "All inputs must have identical shapes or must be scalars.") return input_shape def input_shape(self, inputs): """Get input shape for bounding_box evaluation""" return self._validate_input_shapes(inputs, self._argnames, self.model_set_axis) def _generic_evaluate(self, evaluate, _inputs, fill_value, with_bbox): """ Generic model evaluation routine Selects and evaluates model with or without bounding_box enforcement """ # Evaluate the model using the prepared evaluation method either # enforcing the bounding_box or not. bbox = self.get_bounding_box(with_bbox) if (not isinstance(with_bbox, bool) or with_bbox) and bbox is not None: outputs = bbox.evaluate(evaluate, _inputs, fill_value) else: outputs = evaluate(_inputs) return outputs def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs): """ Model specific post evaluation processing of outputs """ if self.get_bounding_box(with_bbox) is None and self.n_outputs == 1: outputs = (outputs,) outputs = self.prepare_outputs(broadcasted_shapes, *outputs, **kwargs) outputs = self._process_output_units(inputs, outputs) if self.n_outputs == 1: return outputs[0] return outputs @property def bbox_with_units(self): return (not isinstance(self, CompoundModel)) def __call__(self, *args, **kwargs): """ Evaluate this model using the given input(s) and the parameter values that were specified when the model was instantiated. """ # Turn any keyword arguments into positional arguments. args, kwargs = self._get_renamed_inputs_as_positional(*args, **kwargs) # Read model evaluation related parameters with_bbox = kwargs.pop('with_bounding_box', False) fill_value = kwargs.pop('fill_value', np.nan) # prepare for model evaluation (overridden in CompoundModel) evaluate, inputs, broadcasted_shapes, kwargs = self._pre_evaluate(*args, **kwargs) outputs = self._generic_evaluate(evaluate, inputs, fill_value, with_bbox) # post-process evaluation results (overridden in CompoundModel) return self._post_evaluate(inputs, outputs, broadcasted_shapes, with_bbox, **kwargs) def _get_renamed_inputs_as_positional(self, *args, **kwargs): def _keyword2positional(kwargs): # Inputs were passed as keyword (not positional) arguments. # Because the signature of the ``__call__`` is defined at # the class level, the name of the inputs cannot be changed at # the instance level and the old names are always present in the # signature of the method. In order to use the new names of the # inputs, the old names are taken out of ``kwargs``, the input # values are sorted in the order of self.inputs and passed as # positional arguments to ``__call__``. # These are the keys that are always present as keyword arguments. keys = ['model_set_axis', 'with_bounding_box', 'fill_value', 'equivalencies', 'inputs_map'] new_inputs = {} # kwargs contain the names of the new inputs + ``keys`` allkeys = list(kwargs.keys()) # Remove the names of the new inputs from kwargs and save them # to a dict ``new_inputs``. for key in allkeys: if key not in keys: new_inputs[key] = kwargs[key] del kwargs[key] return new_inputs, kwargs n_args = len(args) new_inputs, kwargs = _keyword2positional(kwargs) n_all_args = n_args + len(new_inputs) if n_all_args < self.n_inputs: raise ValueError(f"Missing input arguments - expected {self.n_inputs}," f" got {n_all_args}") elif n_all_args > self.n_inputs: raise ValueError(f"Too many input arguments - expected {self.n_inputs}," f" got {n_all_args}") if n_args == 0: # Create positional arguments from the keyword arguments in ``new_inputs``. new_args = [] for k in self.inputs: new_args.append(new_inputs[k]) elif n_args != self.n_inputs: # Some inputs are passed as positional, others as keyword arguments. args = list(args) # Create positional arguments from the keyword arguments in ``new_inputs``. new_args = [] for k in self.inputs: if k in new_inputs: new_args.append(new_inputs[k]) else: new_args.append(args[0]) del args[0] else: new_args = args return new_args, kwargs # *** Properties *** @property def name(self): """User-provided name for this model instance.""" return self._name @name.setter def name(self, val): """Assign a (new) name to this model.""" self._name = val @property def model_set_axis(self): """ The index of the model set axis--that is the axis of a parameter array that pertains to which model a parameter value pertains to--as specified when the model was initialized. See the documentation on :ref:`astropy:modeling-model-sets` for more details. """ return self._model_set_axis @property def param_sets(self): """ Return parameters as a pset. This is a list with one item per parameter set, which is an array of that parameter's values across all parameter sets, with the last axis associated with the parameter set. """ return self._param_sets() @property def parameters(self): """ A flattened array of all parameter values in all parameter sets. Fittable parameters maintain this list and fitters modify it. """ # Currently the sequence of a model's parameters must be contiguous # within the _parameters array (which may be a view of a larger array, # for example when taking a sub-expression of a compound model), so # the assumption here is reliable: if not self.param_names: # Trivial, but not unheard of return self._parameters self._parameters_to_array() start = self._param_metrics[self.param_names[0]]['slice'].start stop = self._param_metrics[self.param_names[-1]]['slice'].stop return self._parameters[start:stop] @parameters.setter def parameters(self, value): """ Assigning to this attribute updates the parameters array rather than replacing it. """ if not self.param_names: return start = self._param_metrics[self.param_names[0]]['slice'].start stop = self._param_metrics[self.param_names[-1]]['slice'].stop try: value = np.array(value).flatten() self._parameters[start:stop] = value except ValueError as e: raise InputParameterError( "Input parameter values not compatible with the model " f"parameters array: {e!r}") self._array_to_parameters() @property def sync_constraints(self): ''' This is a boolean property that indicates whether or not accessing constraints automatically check the constituent models current values. It defaults to True on creation of a model, but for fitting purposes it should be set to False for performance reasons. ''' if not hasattr(self, '_sync_constraints'): self._sync_constraints = True return self._sync_constraints @sync_constraints.setter def sync_constraints(self, value): if not isinstance(value, bool): raise ValueError('sync_constraints only accepts True or False as values') self._sync_constraints = value @property def fixed(self): """ A ``dict`` mapping parameter names to their fixed constraint. """ if not hasattr(self, '_fixed') or self.sync_constraints: self._fixed = _ConstraintsDict(self, 'fixed') return self._fixed @property def bounds(self): """ A ``dict`` mapping parameter names to their upper and lower bounds as ``(min, max)`` tuples or ``[min, max]`` lists. """ if not hasattr(self, '_bounds') or self.sync_constraints: self._bounds = _ConstraintsDict(self, 'bounds') return self._bounds @property def tied(self): """ A ``dict`` mapping parameter names to their tied constraint. """ if not hasattr(self, '_tied') or self.sync_constraints: self._tied = _ConstraintsDict(self, 'tied') return self._tied @property def eqcons(self): """List of parameter equality constraints.""" return self._mconstraints['eqcons'] @property def ineqcons(self): """List of parameter inequality constraints.""" return self._mconstraints['ineqcons'] def has_inverse(self): """ Returns True if the model has an analytic or user inverse defined. """ try: self.inverse except NotImplementedError: return False return True @property def inverse(self): """ Returns a new `~astropy.modeling.Model` instance which performs the inverse transform, if an analytic inverse is defined for this model. Even on models that don't have an inverse defined, this property can be set with a manually-defined inverse, such a pre-computed or experimentally determined inverse (often given as a `~astropy.modeling.polynomial.PolynomialModel`, but not by requirement). A custom inverse can be deleted with ``del model.inverse``. In this case the model's inverse is reset to its default, if a default exists (otherwise the default is to raise `NotImplementedError`). Note to authors of `~astropy.modeling.Model` subclasses: To define an inverse for a model simply override this property to return the appropriate model representing the inverse. The machinery that will make the inverse manually-overridable is added automatically by the base class. """ if self._user_inverse is not None: return self._user_inverse elif self._inverse is not None: result = self._inverse() if result is not NotImplemented: if not self._has_inverse_bounding_box: result.bounding_box = None return result raise NotImplementedError("No analytical or user-supplied inverse transform " "has been implemented for this model.") @inverse.setter def inverse(self, value): if not isinstance(value, (Model, type(None))): raise ValueError( "The ``inverse`` attribute may be assigned a `Model` " "instance or `None` (where `None` explicitly forces the " "model to have no inverse.") self._user_inverse = value @inverse.deleter def inverse(self): """ Resets the model's inverse to its default (if one exists, otherwise the model will have no inverse). """ try: del self._user_inverse except AttributeError: pass @property def has_user_inverse(self): """ A flag indicating whether or not a custom inverse model has been assigned to this model by a user, via assignment to ``model.inverse``. """ return self._user_inverse is not None @property def bounding_box(self): r""" A `tuple` of length `n_inputs` defining the bounding box limits, or raise `NotImplementedError` for no bounding_box. The default limits are given by a ``bounding_box`` property or method defined in the class body of a specific model. If not defined then this property just raises `NotImplementedError` by default (but may be assigned a custom value by a user). ``bounding_box`` can be set manually to an array-like object of shape ``(model.n_inputs, 2)``. For further usage, see :ref:`astropy:bounding-boxes` The limits are ordered according to the `numpy` ``'C'`` indexing convention, and are the reverse of the model input order, e.g. for inputs ``('x', 'y', 'z')``, ``bounding_box`` is defined: * for 1D: ``(x_low, x_high)`` * for 2D: ``((y_low, y_high), (x_low, x_high))`` * for 3D: ``((z_low, z_high), (y_low, y_high), (x_low, x_high))`` Examples -------- Setting the ``bounding_box`` limits for a 1D and 2D model: >>> from astropy.modeling.models import Gaussian1D, Gaussian2D >>> model_1d = Gaussian1D() >>> model_2d = Gaussian2D(x_stddev=1, y_stddev=1) >>> model_1d.bounding_box = (-5, 5) >>> model_2d.bounding_box = ((-6, 6), (-5, 5)) Setting the bounding_box limits for a user-defined 3D `custom_model`: >>> from astropy.modeling.models import custom_model >>> def const3d(x, y, z, amp=1): ... return amp ... >>> Const3D = custom_model(const3d) >>> model_3d = Const3D() >>> model_3d.bounding_box = ((-6, 6), (-5, 5), (-4, 4)) To reset ``bounding_box`` to its default limits just delete the user-defined value--this will reset it back to the default defined on the class: >>> del model_1d.bounding_box To disable the bounding box entirely (including the default), set ``bounding_box`` to `None`: >>> model_1d.bounding_box = None >>> model_1d.bounding_box # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): NotImplementedError: No bounding box is defined for this model (note: the bounding box was explicitly disabled for this model; use `del model.bounding_box` to restore the default bounding box, if one is defined for this model). """ if self._user_bounding_box is not None: if self._user_bounding_box is NotImplemented: raise NotImplementedError( "No bounding box is defined for this model (note: the " "bounding box was explicitly disabled for this model; " "use `del model.bounding_box` to restore the default " "bounding box, if one is defined for this model).") return self._user_bounding_box elif self._bounding_box is None: raise NotImplementedError( "No bounding box is defined for this model.") elif isinstance(self._bounding_box, ModelBoundingBox): # This typically implies a hard-coded bounding box. This will # probably be rare, but it is an option return self._bounding_box elif isinstance(self._bounding_box, types.MethodType): return ModelBoundingBox.validate(self, self._bounding_box()) else: # The only other allowed possibility is that it's a ModelBoundingBox # subclass, so we call it with its default arguments and return an # instance of it (that can be called to recompute the bounding box # with any optional parameters) # (In other words, in this case self._bounding_box is a *class*) bounding_box = self._bounding_box((), model=self)() return self._bounding_box(bounding_box, model=self) @bounding_box.setter def bounding_box(self, bounding_box): """ Assigns the bounding box limits. """ if bounding_box is None: cls = None # We use this to explicitly set an unimplemented bounding box (as # opposed to no user bounding box defined) bounding_box = NotImplemented elif (isinstance(bounding_box, CompoundBoundingBox) or isinstance(bounding_box, dict)): cls = CompoundBoundingBox elif (isinstance(self._bounding_box, type) and issubclass(self._bounding_box, ModelBoundingBox)): cls = self._bounding_box else: cls = ModelBoundingBox if cls is not None: try: bounding_box = cls.validate(self, bounding_box, _preserve_ignore=True) except ValueError as exc: raise ValueError(exc.args[0]) self._user_bounding_box = bounding_box def set_slice_args(self, *args): if isinstance(self._user_bounding_box, CompoundBoundingBox): self._user_bounding_box.slice_args = args else: raise RuntimeError('The bounding_box for this model is not compound') @bounding_box.deleter def bounding_box(self): self._user_bounding_box = None @property def has_user_bounding_box(self): """ A flag indicating whether or not a custom bounding_box has been assigned to this model by a user, via assignment to ``model.bounding_box``. """ return self._user_bounding_box is not None @property def cov_matrix(self): """ Fitter should set covariance matrix, if available. """ return self._cov_matrix @cov_matrix.setter def cov_matrix(self, cov): self._cov_matrix = cov unfix_untied_params = [p for p in self.param_names if (self.fixed[p] is False) and (self.tied[p] is False)] if type(cov) == list: # model set param_stds = [] for c in cov: param_stds.append([np.sqrt(x) if x > 0 else None for x in np.diag(c.cov_matrix)]) for p, param_name in enumerate(unfix_untied_params): par = getattr(self, param_name) par.std = [item[p] for item in param_stds] setattr(self, param_name, par) else: param_stds = [np.sqrt(x) if x > 0 else None for x in np.diag(cov.cov_matrix)] for param_name in unfix_untied_params: par = getattr(self, param_name) par.std = param_stds.pop(0) setattr(self, param_name, par) @property def stds(self): """ Standard deviation of parameters, if covariance matrix is available. """ return self._stds @stds.setter def stds(self, stds): self._stds = stds @property def separable(self): """ A flag indicating whether a model is separable.""" if self._separable is not None: return self._separable raise NotImplementedError( 'The "separable" property is not defined for ' f'model {self.__class__.__name__}') # *** Public methods *** def without_units_for_data(self, **kwargs): """ Return an instance of the model for which the parameter values have been converted to the right units for the data, then the units have been stripped away. The input and output Quantity objects should be given as keyword arguments. Notes ----- This method is needed in order to be able to fit models with units in the parameters, since we need to temporarily strip away the units from the model during the fitting (which might be done by e.g. scipy functions). The units that the parameters should be converted to are not necessarily the units of the input data, but are derived from them. Model subclasses that want fitting to work in the presence of quantities need to define a ``_parameter_units_for_data_units`` method that takes the input and output units (as two dictionaries) and returns a dictionary giving the target units for each parameter. """ model = self.copy() inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled) for inp in self.inputs if kwargs[inp] is not None} outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled) for out in self.outputs if kwargs[out] is not None} parameter_units = self._parameter_units_for_data_units(inputs_unit, outputs_unit) for name, unit in parameter_units.items(): parameter = getattr(model, name) if parameter.unit is not None: parameter.value = parameter.quantity.to(unit).value parameter._set_unit(None, force=True) if isinstance(model, CompoundModel): model.strip_units_from_tree() return model def output_units(self, **kwargs): """ Return a dictionary of output units for this model given a dictionary of fitting inputs and outputs The input and output Quantity objects should be given as keyword arguments. Notes ----- This method is needed in order to be able to fit models with units in the parameters, since we need to temporarily strip away the units from the model during the fitting (which might be done by e.g. scipy functions). This method will force extra model evaluations, which maybe computationally expensive. To avoid this, one can add a return_units property to the model, see :ref:`astropy:models_return_units`. """ units = self.return_units if units is None or units == {}: inputs = {inp: kwargs[inp] for inp in self.inputs} values = self(**inputs) if self.n_outputs == 1: values = (values,) units = {out: getattr(values[index], 'unit', dimensionless_unscaled) for index, out in enumerate(self.outputs)} return units def strip_units_from_tree(self): for item in self._leaflist: for parname in item.param_names: par = getattr(item, parname) par._set_unit(None, force=True) def with_units_from_data(self, **kwargs): """ Return an instance of the model which has units for which the parameter values are compatible with the data units specified. The input and output Quantity objects should be given as keyword arguments. Notes ----- This method is needed in order to be able to fit models with units in the parameters, since we need to temporarily strip away the units from the model during the fitting (which might be done by e.g. scipy functions). The units that the parameters will gain are not necessarily the units of the input data, but are derived from them. Model subclasses that want fitting to work in the presence of quantities need to define a ``_parameter_units_for_data_units`` method that takes the input and output units (as two dictionaries) and returns a dictionary giving the target units for each parameter. """ model = self.copy() inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled) for inp in self.inputs if kwargs[inp] is not None} outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled) for out in self.outputs if kwargs[out] is not None} parameter_units = self._parameter_units_for_data_units(inputs_unit, outputs_unit) # We are adding units to parameters that already have a value, but we # don't want to convert the parameter, just add the unit directly, # hence the call to ``_set_unit``. for name, unit in parameter_units.items(): parameter = getattr(model, name) parameter._set_unit(unit, force=True) return model @property def _has_units(self): # Returns True if any of the parameters have units for param in self.param_names: if getattr(self, param).unit is not None: return True else: return False @property def _supports_unit_fitting(self): # If the model has a ``_parameter_units_for_data_units`` method, this # indicates that we have enough information to strip the units away # and add them back after fitting, when fitting quantities return hasattr(self, '_parameter_units_for_data_units') @abc.abstractmethod def evaluate(self, *args, **kwargs): """Evaluate the model on some input variables.""" def sum_of_implicit_terms(self, *args, **kwargs): """ Evaluate the sum of any implicit model terms on some input variables. This includes any fixed terms used in evaluating a linear model that do not have corresponding parameters exposed to the user. The prototypical case is `astropy.modeling.functional_models.Shift`, which corresponds to a function y = a + bx, where b=1 is intrinsically fixed by the type of model, such that sum_of_implicit_terms(x) == x. This method is needed by linear fitters to correct the dependent variable for the implicit term(s) when solving for the remaining terms (ie. a = y - bx). """ def render(self, out=None, coords=None): """ Evaluate a model at fixed positions, respecting the ``bounding_box``. The key difference relative to evaluating the model directly is that this method is limited to a bounding box if the `Model.bounding_box` attribute is set. Parameters ---------- out : `numpy.ndarray`, optional An array that the evaluated model will be added to. If this is not given (or given as ``None``), a new array will be created. coords : array-like, optional An array to be used to translate from the model's input coordinates to the ``out`` array. It should have the property that ``self(coords)`` yields the same shape as ``out``. If ``out`` is not specified, ``coords`` will be used to determine the shape of the returned array. If this is not provided (or None), the model will be evaluated on a grid determined by `Model.bounding_box`. Returns ------- out : `numpy.ndarray` The model added to ``out`` if ``out`` is not ``None``, or else a new array from evaluating the model over ``coords``. If ``out`` and ``coords`` are both `None`, the returned array is limited to the `Model.bounding_box` limits. If `Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed. Raises ------ ValueError If ``coords`` are not given and the the `Model.bounding_box` of this model is not set. Examples -------- :ref:`astropy:bounding-boxes` """ try: bbox = self.bounding_box except NotImplementedError: bbox = None if isinstance(bbox, ModelBoundingBox): bbox = bbox.bounding_box() ndim = self.n_inputs if (coords is None) and (out is None) and (bbox is None): raise ValueError('If no bounding_box is set, ' 'coords or out must be input.') # for consistent indexing if ndim == 1: if coords is not None: coords = [coords] if bbox is not None: bbox = [bbox] if coords is not None: coords = np.asanyarray(coords, dtype=float) # Check dimensions match out and model assert len(coords) == ndim if out is not None: if coords[0].shape != out.shape: raise ValueError('inconsistent shape of the output.') else: out = np.zeros(coords[0].shape) if out is not None: out = np.asanyarray(out) if out.ndim != ndim: raise ValueError('the array and model must have the same ' 'number of dimensions.') if bbox is not None: # Assures position is at center pixel, # important when using add_array. pd = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox]).astype(int).T pos, delta = pd if coords is not None: sub_shape = tuple(delta * 2 + 1) sub_coords = np.array([extract_array(c, sub_shape, pos) for c in coords]) else: limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T] sub_coords = np.mgrid[limits] sub_coords = sub_coords[::-1] if out is None: out = self(*sub_coords) else: try: out = add_array(out, self(*sub_coords), pos) except ValueError: raise ValueError( 'The `bounding_box` is larger than the input out in ' 'one or more dimensions. Set ' '`model.bounding_box = None`.') else: if coords is None: im_shape = out.shape limits = [slice(i) for i in im_shape] coords = np.mgrid[limits] coords = coords[::-1] out += self(*coords) return out @property def input_units(self): """ This property is used to indicate what units or sets of units the evaluate method expects, and returns a dictionary mapping inputs to units (or `None` if any units are accepted). Model sub-classes can also use function annotations in evaluate to indicate valid input units, in which case this property should not be overridden since it will return the input units based on the annotations. """ if hasattr(self, '_input_units'): return self._input_units elif hasattr(self.evaluate, '__annotations__'): annotations = self.evaluate.__annotations__.copy() annotations.pop('return', None) if annotations: # If there are not annotations for all inputs this will error. return dict((name, annotations[name]) for name in self.inputs) else: # None means any unit is accepted return None @property def return_units(self): """ This property is used to indicate what units or sets of units the output of evaluate should be in, and returns a dictionary mapping outputs to units (or `None` if any units are accepted). Model sub-classes can also use function annotations in evaluate to indicate valid output units, in which case this property should not be overridden since it will return the return units based on the annotations. """ if hasattr(self, '_return_units'): return self._return_units elif hasattr(self.evaluate, '__annotations__'): return self.evaluate.__annotations__.get('return', None) else: # None means any unit is accepted return None def _prepare_inputs_single_model(self, params, inputs, **kwargs): broadcasts = [] for idx, _input in enumerate(inputs): input_shape = _input.shape # Ensure that array scalars are always upgrade to 1-D arrays for the # sake of consistency with how parameters work. They will be cast back # to scalars at the end if not input_shape: inputs[idx] = _input.reshape((1,)) if not params: max_broadcast = input_shape else: max_broadcast = () for param in params: try: if self.standard_broadcasting: broadcast = check_broadcast(input_shape, param.shape) else: broadcast = input_shape except IncompatibleShapeError: raise ValueError( f"self input argument {self.inputs[idx]!r} of shape {input_shape!r} " f"cannot be broadcast with parameter {param.name!r} of shape " f"{param.shape!r}.") if len(broadcast) > len(max_broadcast): max_broadcast = broadcast elif len(broadcast) == len(max_broadcast): max_broadcast = max(max_broadcast, broadcast) broadcasts.append(max_broadcast) if self.n_outputs > self.n_inputs: extra_outputs = self.n_outputs - self.n_inputs if not broadcasts: # If there were no inputs then the broadcasts list is empty # just add a None since there is no broadcasting of outputs and # inputs necessary (see _prepare_outputs_single_self) broadcasts.append(None) broadcasts.extend([broadcasts[0]] * extra_outputs) return inputs, (broadcasts,) @staticmethod def _remove_axes_from_shape(shape, axis): """ Given a shape tuple as the first input, construct a new one by removing that particular axis from the shape and all preceeding axes. Negative axis numbers are permittted, where the axis is relative to the last axis. """ if len(shape) == 0: return shape if axis < 0: axis = len(shape) + axis return shape[:axis] + shape[axis+1:] if axis >= len(shape): axis = len(shape)-1 shape = shape[axis+1:] return shape def _prepare_inputs_model_set(self, params, inputs, model_set_axis_input, **kwargs): reshaped = [] pivots = [] model_set_axis_param = self.model_set_axis # needed to reshape param for idx, _input in enumerate(inputs): max_param_shape = () if self._n_models > 1 and model_set_axis_input is not False: # Use the shape of the input *excluding* the model axis input_shape = (_input.shape[:model_set_axis_input] + _input.shape[model_set_axis_input + 1:]) else: input_shape = _input.shape for param in params: try: check_broadcast(input_shape, self._remove_axes_from_shape(param.shape, model_set_axis_param)) except IncompatibleShapeError: raise ValueError( f"Model input argument {self.inputs[idx]!r} of shape {input_shape!r} " f"cannot be broadcast with parameter {param.name!r} of shape " f"{self._remove_axes_from_shape(param.shape, model_set_axis_param)!r}.") if len(param.shape) - 1 > len(max_param_shape): max_param_shape = self._remove_axes_from_shape(param.shape, model_set_axis_param) # We've now determined that, excluding the model_set_axis, the # input can broadcast with all the parameters input_ndim = len(input_shape) if model_set_axis_input is False: if len(max_param_shape) > input_ndim: # Just needs to prepend new axes to the input n_new_axes = 1 + len(max_param_shape) - input_ndim new_axes = (1,) * n_new_axes new_shape = new_axes + _input.shape pivot = model_set_axis_param else: pivot = input_ndim - len(max_param_shape) new_shape = (_input.shape[:pivot] + (1,) + _input.shape[pivot:]) new_input = _input.reshape(new_shape) else: if len(max_param_shape) >= input_ndim: n_new_axes = len(max_param_shape) - input_ndim pivot = self.model_set_axis new_axes = (1,) * n_new_axes new_shape = (_input.shape[:pivot + 1] + new_axes + _input.shape[pivot + 1:]) new_input = _input.reshape(new_shape) else: pivot = _input.ndim - len(max_param_shape) - 1 new_input = np.rollaxis(_input, model_set_axis_input, pivot + 1) pivots.append(pivot) reshaped.append(new_input) if self.n_inputs < self.n_outputs: pivots.extend([model_set_axis_input] * (self.n_outputs - self.n_inputs)) return reshaped, (pivots,) def prepare_inputs(self, *inputs, model_set_axis=None, equivalencies=None, **kwargs): """ This method is used in `~astropy.modeling.Model.__call__` to ensure that all the inputs to the model can be broadcast into compatible shapes (if one or both of them are input as arrays), particularly if there are more than one parameter sets. This also makes sure that (if applicable) the units of the input will be compatible with the evaluate method. """ # When we instantiate the model class, we make sure that __call__ can # take the following two keyword arguments: model_set_axis and # equivalencies. if model_set_axis is None: # By default the model_set_axis for the input is assumed to be the # same as that for the parameters the model was defined with # TODO: Ensure that negative model_set_axis arguments are respected model_set_axis = self.model_set_axis params = [getattr(self, name) for name in self.param_names] inputs = [np.asanyarray(_input, dtype=float) for _input in inputs] self._validate_input_shapes(inputs, self.inputs, model_set_axis) inputs_map = kwargs.get('inputs_map', None) inputs = self._validate_input_units(inputs, equivalencies, inputs_map) # The input formatting required for single models versus a multiple # model set are different enough that they've been split into separate # subroutines if self._n_models == 1: return self._prepare_inputs_single_model(params, inputs, **kwargs) else: return self._prepare_inputs_model_set(params, inputs, model_set_axis, **kwargs) def _validate_input_units(self, inputs, equivalencies=None, inputs_map=None): inputs = list(inputs) name = self.name or self.__class__.__name__ # Check that the units are correct, if applicable if self.input_units is not None: # If a leaflist is provided that means this is in the context of # a compound model and it is necessary to create the appropriate # alias for the input coordinate name for the equivalencies dict if inputs_map: edict = {} for mod, mapping in inputs_map: if self is mod: edict[mapping[0]] = equivalencies[mapping[1]] else: edict = equivalencies # We combine any instance-level input equivalencies with user # specified ones at call-time. input_units_equivalencies = _combine_equivalency_dict(self.inputs, edict, self.input_units_equivalencies) # We now iterate over the different inputs and make sure that their # units are consistent with those specified in input_units. for i in range(len(inputs)): input_name = self.inputs[i] input_unit = self.input_units.get(input_name, None) if input_unit is None: continue if isinstance(inputs[i], Quantity): # We check for consistency of the units with input_units, # taking into account any equivalencies if inputs[i].unit.is_equivalent( input_unit, equivalencies=input_units_equivalencies[input_name]): # If equivalencies have been specified, we need to # convert the input to the input units - this is # because some equivalencies are non-linear, and # we need to be sure that we evaluate the model in # its own frame of reference. If input_units_strict # is set, we also need to convert to the input units. if (len(input_units_equivalencies) > 0 or self.input_units_strict[input_name]): inputs[i] = inputs[i].to(input_unit, equivalencies=input_units_equivalencies[ input_name]) else: # We consider the following two cases separately so as # to be able to raise more appropriate/nicer exceptions if input_unit is dimensionless_unscaled: raise UnitsError(f"{name}: Units of input '{self.inputs[i]}', " f"{inputs[i].unit} ({inputs[i].unit.physical_type})," "could not be converted to " "required dimensionless " "input") else: raise UnitsError(f"{name}: Units of input '{self.inputs[i]}', " f"{inputs[i].unit} ({inputs[i].unit.physical_type})," " could not be " "converted to required input" f" units of {input_unit} ({input_unit.physical_type})") else: # If we allow dimensionless input, we add the units to the # input values without conversion, otherwise we raise an # exception. if (not self.input_units_allow_dimensionless[input_name] and input_unit is not dimensionless_unscaled and input_unit is not None): if np.any(inputs[i] != 0): raise UnitsError(f"{name}: Units of input '{self.inputs[i]}'," " (dimensionless), could not be converted to required " f"input units of {input_unit} " f"({input_unit.physical_type})") return inputs def _process_output_units(self, inputs, outputs): inputs_are_quantity = any([isinstance(i, Quantity) for i in inputs]) if self.return_units and inputs_are_quantity: # We allow a non-iterable unit only if there is one output if self.n_outputs == 1 and not isiterable(self.return_units): return_units = {self.outputs[0]: self.return_units} else: return_units = self.return_units outputs = tuple([Quantity(out, return_units.get(out_name, None), subok=True) for out, out_name in zip(outputs, self.outputs)]) return outputs @staticmethod def _prepare_output_single_model(output, broadcast_shape): if broadcast_shape is not None: if not broadcast_shape: return output.item() else: try: return output.reshape(broadcast_shape) except ValueError: try: return output.item() except ValueError: return output return output def _prepare_outputs_single_model(self, outputs, broadcasted_shapes): outputs = list(outputs) for idx, output in enumerate(outputs): try: broadcast_shape = check_broadcast(*broadcasted_shapes[0]) except (IndexError, TypeError): broadcast_shape = broadcasted_shapes[0][idx] outputs[idx] = self._prepare_output_single_model(output, broadcast_shape) return tuple(outputs) def _prepare_outputs_model_set(self, outputs, broadcasted_shapes, model_set_axis): pivots = broadcasted_shapes[0] # If model_set_axis = False was passed then use # self._model_set_axis to format the output. if model_set_axis is None or model_set_axis is False: model_set_axis = self.model_set_axis outputs = list(outputs) for idx, output in enumerate(outputs): pivot = pivots[idx] if pivot < output.ndim and pivot != model_set_axis: outputs[idx] = np.rollaxis(output, pivot, model_set_axis) return tuple(outputs) def prepare_outputs(self, broadcasted_shapes, *outputs, **kwargs): model_set_axis = kwargs.get('model_set_axis', None) if len(self) == 1: return self._prepare_outputs_single_model(outputs, broadcasted_shapes) else: return self._prepare_outputs_model_set(outputs, broadcasted_shapes, model_set_axis) def copy(self): """ Return a copy of this model. Uses a deep copy so that all model attributes, including parameter values, are copied as well. """ return copy.deepcopy(self) def deepcopy(self): """ Return a deep copy of this model. """ return self.copy() @sharedmethod def rename(self, name): """ Return a copy of this model with a new name. """ new_model = self.copy() new_model._name = name return new_model def coerce_units( self, input_units=None, return_units=None, input_units_equivalencies=None, input_units_allow_dimensionless=False ): """ Attach units to this (unitless) model. Parameters ---------- input_units : dict or tuple, optional Input units to attach. If dict, each key is the name of a model input, and the value is the unit to attach. If tuple, the elements are units to attach in order corresponding to `Model.inputs`. return_units : dict or tuple, optional Output units to attach. If dict, each key is the name of a model output, and the value is the unit to attach. If tuple, the elements are units to attach in order corresponding to `Model.outputs`. input_units_equivalencies : dict, optional Default equivalencies to apply to input values. If set, this should be a dictionary where each key is a string that corresponds to one of the model inputs. input_units_allow_dimensionless : bool or dict, optional Allow dimensionless input. If this is True, input values to evaluate will gain the units specified in input_units. If this is a dictionary then it should map input name to a bool to allow dimensionless numbers for that input. Returns ------- `CompoundModel` A `CompoundModel` composed of the current model plus `~astropy.modeling.mappings.UnitsMapping` model(s) that attach the units. Raises ------ ValueError If the current model already has units. Examples -------- Wrapping a unitless model to require and convert units: >>> from astropy.modeling.models import Polynomial1D >>> from astropy import units as u >>> poly = Polynomial1D(1, c0=1, c1=2) >>> model = poly.coerce_units((u.m,), (u.s,)) >>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(u.Quantity(1000, u.cm)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(u.Quantity(10, u.cm)) # doctest: +FLOAT_CMP <Quantity 1.2 s> Wrapping a unitless model but still permitting unitless input: >>> from astropy.modeling.models import Polynomial1D >>> from astropy import units as u >>> poly = Polynomial1D(1, c0=1, c1=2) >>> model = poly.coerce_units((u.m,), (u.s,), input_units_allow_dimensionless=True) >>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(10) # doctest: +FLOAT_CMP <Quantity 21. s> """ from .mappings import UnitsMapping result = self if input_units is not None: if self.input_units is not None: model_units = self.input_units else: model_units = {} for unit in [model_units.get(i) for i in self.inputs]: if unit is not None and unit != dimensionless_unscaled: raise ValueError("Cannot specify input_units for model with " "existing input units") if isinstance(input_units, dict): if input_units.keys() != set(self.inputs): message = ( f"""input_units keys ({", ".join(input_units.keys())}) """ f"""do not match model inputs ({", ".join(self.inputs)})""" ) raise ValueError(message) input_units = [input_units[i] for i in self.inputs] if len(input_units) != self.n_inputs: message = ( "input_units length does not match n_inputs: " f"expected {self.n_inputs}, received {len(input_units)}" ) raise ValueError(message) mapping = tuple((unit, model_units.get(i)) for i, unit in zip(self.inputs, input_units)) input_mapping = UnitsMapping( mapping, input_units_equivalencies=input_units_equivalencies, input_units_allow_dimensionless=input_units_allow_dimensionless ) input_mapping.inputs = self.inputs input_mapping.outputs = self.inputs result = input_mapping | result if return_units is not None: if self.return_units is not None: model_units = self.return_units else: model_units = {} for unit in [model_units.get(i) for i in self.outputs]: if unit is not None and unit != dimensionless_unscaled: raise ValueError("Cannot specify return_units for model " "with existing output units") if isinstance(return_units, dict): if return_units.keys() != set(self.outputs): message = ( f"""return_units keys ({", ".join(return_units.keys())}) """ f"""do not match model outputs ({", ".join(self.outputs)})""" ) raise ValueError(message) return_units = [return_units[i] for i in self.outputs] if len(return_units) != self.n_outputs: message = ( "return_units length does not match n_outputs: " f"expected {self.n_outputs}, received {len(return_units)}" ) raise ValueError(message) mapping = tuple((model_units.get(i), unit) for i, unit in zip(self.outputs, return_units)) return_mapping = UnitsMapping(mapping) return_mapping.inputs = self.outputs return_mapping.outputs = self.outputs result = result | return_mapping return result @property def n_submodels(self): """ Return the number of components in a single model, which is obviously 1. """ return 1 def _initialize_constraints(self, kwargs): """ Pop parameter constraint values off the keyword arguments passed to `Model.__init__` and store them in private instance attributes. """ # Pop any constraints off the keyword arguments for constraint in self.parameter_constraints: values = kwargs.pop(constraint, {}) for ckey, cvalue in values.items(): param = getattr(self, ckey) setattr(param, constraint, cvalue) self._mconstraints = {} for constraint in self.model_constraints: values = kwargs.pop(constraint, []) self._mconstraints[constraint] = values def _initialize_parameters(self, args, kwargs): """ Initialize the _parameters array that stores raw parameter values for all parameter sets for use with vectorized fitting algorithms; on FittableModels the _param_name attributes actually just reference slices of this array. """ n_models = kwargs.pop('n_models', None) if not (n_models is None or (isinstance(n_models, (int, np.integer)) and n_models >= 1)): raise ValueError( "n_models must be either None (in which case it is " "determined from the model_set_axis of the parameter initial " "values) or it must be a positive integer " f"(got {n_models!r})") model_set_axis = kwargs.pop('model_set_axis', None) if model_set_axis is None: if n_models is not None and n_models > 1: # Default to zero model_set_axis = 0 else: # Otherwise disable model_set_axis = False else: if not (model_set_axis is False or np.issubdtype(type(model_set_axis), np.integer)): raise ValueError( "model_set_axis must be either False or an integer " "specifying the parameter array axis to map to each " f"model in a set of models (got {model_set_axis!r}).") # Process positional arguments by matching them up with the # corresponding parameters in self.param_names--if any also appear as # keyword arguments this presents a conflict params = set() if len(args) > len(self.param_names): raise TypeError( f"{self.__class__.__name__}.__init__() takes at most " f"{len(self.param_names)} positional arguments ({len(args)} given)") self._model_set_axis = model_set_axis self._param_metrics = defaultdict(dict) for idx, arg in enumerate(args): if arg is None: # A value of None implies using the default value, if exists continue # We use quantity_asanyarray here instead of np.asanyarray because # if any of the arguments are quantities, we need to return a # Quantity object not a plain Numpy array. param_name = self.param_names[idx] params.add(param_name) if not isinstance(arg, Parameter): value = quantity_asanyarray(arg, dtype=float) else: value = arg self._initialize_parameter_value(param_name, value) # At this point the only remaining keyword arguments should be # parameter names; any others are in error. for param_name in self.param_names: if param_name in kwargs: if param_name in params: raise TypeError( f"{self.__class__.__name__}.__init__() got multiple values for parameter " f"{param_name!r}") value = kwargs.pop(param_name) if value is None: continue # We use quantity_asanyarray here instead of np.asanyarray # because if any of the arguments are quantities, we need # to return a Quantity object not a plain Numpy array. value = quantity_asanyarray(value, dtype=float) params.add(param_name) self._initialize_parameter_value(param_name, value) # Now deal with case where param_name is not supplied by args or kwargs for param_name in self.param_names: if param_name not in params: self._initialize_parameter_value(param_name, None) if kwargs: # If any keyword arguments were left over at this point they are # invalid--the base class should only be passed the parameter # values, constraints, and param_dim for kwarg in kwargs: # Just raise an error on the first unrecognized argument raise TypeError( f"{self.__class__.__name__}.__init__() got an unrecognized parameter " f"{kwarg!r}") # Determine the number of model sets: If the model_set_axis is # None then there is just one parameter set; otherwise it is determined # by the size of that axis on the first parameter--if the other # parameters don't have the right number of axes or the sizes of their # model_set_axis don't match an error is raised if model_set_axis is not False and n_models != 1 and params: max_ndim = 0 if model_set_axis < 0: min_ndim = abs(model_set_axis) else: min_ndim = model_set_axis + 1 for name in self.param_names: value = getattr(self, name) param_ndim = np.ndim(value) if param_ndim < min_ndim: raise InputParameterError( "All parameter values must be arrays of dimension " f"at least {min_ndim} for model_set_axis={model_set_axis} (the value " f"given for {name!r} is only {param_ndim}-dimensional)") max_ndim = max(max_ndim, param_ndim) if n_models is None: # Use the dimensions of the first parameter to determine # the number of model sets n_models = value.shape[model_set_axis] elif value.shape[model_set_axis] != n_models: raise InputParameterError( f"Inconsistent dimensions for parameter {name!r} for " f"{n_models} model sets. The length of axis {model_set_axis} must be the " "same for all input parameter values") self._check_param_broadcast(max_ndim) else: if n_models is None: n_models = 1 self._check_param_broadcast(None) self._n_models = n_models # now validate parameters for name in params: param = getattr(self, name) if param._validator is not None: param._validator(self, param.value) def _initialize_parameter_value(self, param_name, value): """Mostly deals with consistency checks and determining unit issues.""" if isinstance(value, Parameter): self.__dict__[param_name] = value return param = getattr(self, param_name) # Use default if value is not provided if value is None: default = param.default if default is None: # No value was supplied for the parameter and the # parameter does not have a default, therefore the model # is underspecified raise TypeError(f"{self.__class__.__name__}.__init__() requires a value for " f"parameter {param_name!r}") value = default unit = param.unit else: if isinstance(value, Quantity): unit = value.unit value = value.value else: unit = None if unit is None and param.unit is not None: raise InputParameterError( f"{self.__class__.__name__}.__init__() requires a Quantity for parameter " f"{param_name!r}") param._unit = unit param.internal_unit = None if param._setter is not None: if unit is not None: _val = param._setter(value * unit) else: _val = param._setter(value) if isinstance(_val, Quantity): param.internal_unit = _val.unit param._internal_value = np.array(_val.value) else: param.internal_unit = None param._internal_value = np.array(_val) else: param._value = np.array(value) def _initialize_slices(self): param_metrics = self._param_metrics total_size = 0 for name in self.param_names: param = getattr(self, name) value = param.value param_size = np.size(value) param_shape = np.shape(value) param_slice = slice(total_size, total_size + param_size) param_metrics[name]['slice'] = param_slice param_metrics[name]['shape'] = param_shape param_metrics[name]['size'] = param_size total_size += param_size self._parameters = np.empty(total_size, dtype=np.float64) def _parameters_to_array(self): # Now set the parameter values (this will also fill # self._parameters) param_metrics = self._param_metrics for name in self.param_names: param = getattr(self, name) value = param.value if not isinstance(value, np.ndarray): value = np.array([value]) self._parameters[param_metrics[name]['slice']] = value.ravel() # Finally validate all the parameters; we do this last so that # validators that depend on one of the other parameters' values will # work def _array_to_parameters(self): param_metrics = self._param_metrics for name in self.param_names: param = getattr(self, name) value = self._parameters[param_metrics[name]['slice']] value.shape = param_metrics[name]['shape'] param.value = value def _check_param_broadcast(self, max_ndim): """ This subroutine checks that all parameter arrays can be broadcast against each other, and determines the shapes parameters must have in order to broadcast correctly. If model_set_axis is None this merely checks that the parameters broadcast and returns an empty dict if so. This mode is only used for single model sets. """ all_shapes = [] model_set_axis = self._model_set_axis for name in self.param_names: param = getattr(self, name) value = param.value param_shape = np.shape(value) param_ndim = len(param_shape) if max_ndim is not None and param_ndim < max_ndim: # All arrays have the same number of dimensions up to the # model_set_axis dimension, but after that they may have a # different number of trailing axes. The number of trailing # axes must be extended for mutual compatibility. For example # if max_ndim = 3 and model_set_axis = 0, an array with the # shape (2, 2) must be extended to (2, 1, 2). However, an # array with shape (2,) is extended to (2, 1). new_axes = (1,) * (max_ndim - param_ndim) if model_set_axis < 0: # Just need to prepend axes to make up the difference broadcast_shape = new_axes + param_shape else: broadcast_shape = (param_shape[:model_set_axis + 1] + new_axes + param_shape[model_set_axis + 1:]) self._param_metrics[name]['broadcast_shape'] = broadcast_shape all_shapes.append(broadcast_shape) else: all_shapes.append(param_shape) # Now check mutual broadcastability of all shapes try: check_broadcast(*all_shapes) except IncompatibleShapeError as exc: shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args param_a = self.param_names[shape_a_idx] param_b = self.param_names[shape_b_idx] raise InputParameterError( f"Parameter {param_a!r} of shape {shape_a!r} cannot be broadcast with " f"parameter {param_b!r} of shape {shape_b!r}. All parameter arrays " "must have shapes that are mutually compatible according " "to the broadcasting rules.") def _param_sets(self, raw=False, units=False): """ Implementation of the Model.param_sets property. This internal implementation has a ``raw`` argument which controls whether or not to return the raw parameter values (i.e. the values that are actually stored in the ._parameters array, as opposed to the values displayed to users. In most cases these are one in the same but there are currently a few exceptions. Note: This is notably an overcomplicated device and may be removed entirely in the near future. """ values = [] shapes = [] for name in self.param_names: param = getattr(self, name) if raw and param._setter: value = param._internal_value else: value = param.value broadcast_shape = self._param_metrics[name].get('broadcast_shape') if broadcast_shape is not None: value = value.reshape(broadcast_shape) shapes.append(np.shape(value)) if len(self) == 1: # Add a single param set axis to the parameter's value (thus # converting scalars to shape (1,) array values) for # consistency value = np.array([value]) if units: if raw and param.internal_unit is not None: unit = param.internal_unit else: unit = param.unit if unit is not None: value = Quantity(value, unit) values.append(value) if len(set(shapes)) != 1 or units: # If the parameters are not all the same shape, converting to an # array is going to produce an object array # However the way Numpy creates object arrays is tricky in that it # will recurse into array objects in the list and break them up # into separate objects. Doing things this way ensures a 1-D # object array the elements of which are the individual parameter # arrays. There's not much reason to do this over returning a list # except for consistency psets = np.empty(len(values), dtype=object) psets[:] = values return psets return np.array(values) def _format_repr(self, args=[], kwargs={}, defaults={}): """ Internal implementation of ``__repr__``. This is separated out for ease of use by subclasses that wish to override the default ``__repr__`` while keeping the same basic formatting. """ parts = [repr(a) for a in args] parts.extend( f"{name}={param_repr_oneline(getattr(self, name))}" for name in self.param_names) if self.name is not None: parts.append(f'name={self.name!r}') for kwarg, value in kwargs.items(): if kwarg in defaults and defaults[kwarg] == value: continue parts.append(f'{kwarg}={value!r}') if len(self) > 1: parts.append(f"n_models={len(self)}") return f"<{self.__class__.__name__}({', '.join(parts)})>" def _format_str(self, keywords=[], defaults={}): """ Internal implementation of ``__str__``. This is separated out for ease of use by subclasses that wish to override the default ``__str__`` while keeping the same basic formatting. """ default_keywords = [ ('Model', self.__class__.__name__), ('Name', self.name), ('Inputs', self.inputs), ('Outputs', self.outputs), ('Model set size', len(self)) ] parts = [f'{keyword}: {value}' for keyword, value in default_keywords if value is not None] for keyword, value in keywords: if keyword.lower() in defaults and defaults[keyword.lower()] == value: continue parts.append(f'{keyword}: {value}') parts.append('Parameters:') if len(self) == 1: columns = [[getattr(self, name).value] for name in self.param_names] else: columns = [getattr(self, name).value for name in self.param_names] if columns: param_table = Table(columns, names=self.param_names) # Set units on the columns for name in self.param_names: param_table[name].unit = getattr(self, name).unit parts.append(indent(str(param_table), width=4)) return '\n'.join(parts) class FittableModel(Model): """ Base class for models that can be fitted using the built-in fitting algorithms. """ linear = False # derivative with respect to parameters fit_deriv = None """ Function (similar to the model's `~Model.evaluate`) to compute the derivatives of the model with respect to its parameters, for use by fitting algorithms. In other words, this computes the Jacobian matrix with respect to the model's parameters. """ # Flag that indicates if the model derivatives with respect to parameters # are given in columns or rows col_fit_deriv = True fittable = True class Fittable1DModel(FittableModel): """ Base class for one-dimensional fittable models. This class provides an easier interface to defining new models. Examples can be found in `astropy.modeling.functional_models`. """ n_inputs = 1 n_outputs = 1 _separable = True class Fittable2DModel(FittableModel): """ Base class for two-dimensional fittable models. This class provides an easier interface to defining new models. Examples can be found in `astropy.modeling.functional_models`. """ n_inputs = 2 n_outputs = 1 def _make_arithmetic_operator(oper): # We don't bother with tuple unpacking here for efficiency's sake, but for # documentation purposes: # # f_eval, f_n_inputs, f_n_outputs = f # # and similarly for g def op(f, g): return (make_binary_operator_eval(oper, f[0], g[0]), f[1], f[2]) return op def _composition_operator(f, g): # We don't bother with tuple unpacking here for efficiency's sake, but for # documentation purposes: # # f_eval, f_n_inputs, f_n_outputs = f # # and similarly for g return (lambda inputs, params: g[0](f[0](inputs, params), params), f[1], g[2]) def _join_operator(f, g): # We don't bother with tuple unpacking here for efficiency's sake, but for # documentation purposes: # # f_eval, f_n_inputs, f_n_outputs = f # # and similarly for g return (lambda inputs, params: (f[0](inputs[:f[1]], params) + g[0](inputs[f[1]:], params)), f[1] + g[1], f[2] + g[2]) BINARY_OPERATORS = { '+': _make_arithmetic_operator(operator.add), '-': _make_arithmetic_operator(operator.sub), '*': _make_arithmetic_operator(operator.mul), '/': _make_arithmetic_operator(operator.truediv), '**': _make_arithmetic_operator(operator.pow), '|': _composition_operator, '&': _join_operator } SPECIAL_OPERATORS = _SpecialOperatorsDict() def _add_special_operator(sop_name, sop): return SPECIAL_OPERATORS.add(sop_name, sop) class CompoundModel(Model): ''' Base class for compound models. While it can be used directly, the recommended way to combine models is through the model operators. ''' def __init__(self, op, left, right, name=None): self.__dict__['_param_names'] = None self._n_submodels = None self.op = op self.left = left self.right = right self._bounding_box = None self._user_bounding_box = None self._leaflist = None self._tdict = None self._parameters = None self._parameters_ = None self._param_metrics = None if op != 'fix_inputs' and len(left) != len(right): raise ValueError( 'Both operands must have equal values for n_models') self._n_models = len(left) if op != 'fix_inputs' and ((left.model_set_axis != right.model_set_axis) or left.model_set_axis): # not False and not 0 raise ValueError("model_set_axis must be False or 0 and consistent for operands") self._model_set_axis = left.model_set_axis if op in ['+', '-', '*', '/', '**'] or op in SPECIAL_OPERATORS: if (left.n_inputs != right.n_inputs or left.n_outputs != right.n_outputs): raise ModelDefinitionError( 'Both operands must match numbers of inputs and outputs') self.n_inputs = left.n_inputs self.n_outputs = left.n_outputs self.inputs = left.inputs self.outputs = left.outputs elif op == '&': self.n_inputs = left.n_inputs + right.n_inputs self.n_outputs = left.n_outputs + right.n_outputs self.inputs = combine_labels(left.inputs, right.inputs) self.outputs = combine_labels(left.outputs, right.outputs) elif op == '|': if left.n_outputs != right.n_inputs: raise ModelDefinitionError( f"Unsupported operands for |: {left.name} (n_inputs={left.n_inputs}, " f"n_outputs={left.n_outputs}) and {right.name} " f"(n_inputs={right.n_inputs}, n_outputs={right.n_outputs}); " "n_outputs for the left-hand model must match n_inputs " "for the right-hand model.") self.n_inputs = left.n_inputs self.n_outputs = right.n_outputs self.inputs = left.inputs self.outputs = right.outputs elif op == 'fix_inputs': if not isinstance(left, Model): raise ValueError('First argument to "fix_inputs" must be an instance of ' 'an astropy Model.') if not isinstance(right, dict): raise ValueError('Expected a dictionary for second argument of "fix_inputs".') # Dict keys must match either possible indices # for model on left side, or names for inputs. self.n_inputs = left.n_inputs - len(right) # Assign directly to the private attribute (instead of using the setter) # to avoid asserting the new number of outputs matches the old one. self._outputs = left.outputs self.n_outputs = left.n_outputs newinputs = list(left.inputs) keys = right.keys() input_ind = [] for key in keys: if np.issubdtype(type(key), np.integer): if key >= left.n_inputs or key < 0: raise ValueError( 'Substitution key integer value ' 'not among possible input choices.') if key in input_ind: raise ValueError("Duplicate specification of " "same input (index/name).") input_ind.append(key) elif isinstance(key, str): if key not in left.inputs: raise ValueError( 'Substitution key string not among possible ' 'input choices.') # Check to see it doesn't match positional # specification. ind = left.inputs.index(key) if ind in input_ind: raise ValueError("Duplicate specification of " "same input (index/name).") input_ind.append(ind) # Remove substituted inputs input_ind.sort() input_ind.reverse() for ind in input_ind: del newinputs[ind] self.inputs = tuple(newinputs) # Now check to see if the input model has bounding_box defined. # If so, remove the appropriate dimensions and set it for this # instance. try: self.bounding_box = self.left.bounding_box.fix_inputs(self, right) except NotImplementedError: pass else: raise ModelDefinitionError('Illegal operator: ', self.op) self.name = name self._fittable = None self.fit_deriv = None self.col_fit_deriv = None if op in ('|', '+', '-'): self.linear = left.linear and right.linear else: self.linear = False self.eqcons = [] self.ineqcons = [] self.n_left_params = len(self.left.parameters) self._map_parameters() def _get_left_inputs_from_args(self, args): return args[:self.left.n_inputs] def _get_right_inputs_from_args(self, args): op = self.op if op == '&': # Args expected to look like (*left inputs, *right inputs, *left params, *right params) return args[self.left.n_inputs: self.left.n_inputs + self.right.n_inputs] elif op == '|' or op == 'fix_inputs': return None else: return args[:self.left.n_inputs] def _get_left_params_from_args(self, args): op = self.op if op == '&': # Args expected to look like (*left inputs, *right inputs, *left params, *right params) n_inputs = self.left.n_inputs + self.right.n_inputs return args[n_inputs: n_inputs + self.n_left_params] else: return args[self.left.n_inputs: self.left.n_inputs + self.n_left_params] def _get_right_params_from_args(self, args): op = self.op if op == 'fix_inputs': return None if op == '&': # Args expected to look like (*left inputs, *right inputs, *left params, *right params) return args[self.left.n_inputs + self.right.n_inputs + self.n_left_params:] else: return args[self.left.n_inputs + self.n_left_params:] def _get_kwarg_model_parameters_as_positional(self, args, kwargs): # could do it with inserts but rebuilding seems like simpilist way # TODO: Check if any param names are in kwargs maybe as an intersection of sets? if self.op == "&": new_args = list(args[:self.left.n_inputs + self.right.n_inputs]) args_pos = self.left.n_inputs + self.right.n_inputs else: new_args = list(args[:self.left.n_inputs]) args_pos = self.left.n_inputs for param_name in self.param_names: kw_value = kwargs.pop(param_name, None) if kw_value is not None: value = kw_value else: try: value = args[args_pos] except IndexError: raise IndexError("Missing parameter or input") args_pos += 1 new_args.append(value) return new_args, kwargs def _apply_operators_to_value_lists(self, leftval, rightval, **kw): op = self.op if op == '+': return binary_operation(operator.add, leftval, rightval) elif op == '-': return binary_operation(operator.sub, leftval, rightval) elif op == '*': return binary_operation(operator.mul, leftval, rightval) elif op == '/': return binary_operation(operator.truediv, leftval, rightval) elif op == '**': return binary_operation(operator.pow, leftval, rightval) elif op == '&': if not isinstance(leftval, tuple): leftval = (leftval,) if not isinstance(rightval, tuple): rightval = (rightval,) return leftval + rightval elif op in SPECIAL_OPERATORS: return binary_operation(SPECIAL_OPERATORS[op], leftval, rightval) else: raise ModelDefinitionError('Unrecognized operator {op}') def evaluate(self, *args, **kw): op = self.op args, kw = self._get_kwarg_model_parameters_as_positional(args, kw) left_inputs = self._get_left_inputs_from_args(args) left_params = self._get_left_params_from_args(args) if op == 'fix_inputs': pos_index = dict(zip(self.left.inputs, range(self.left.n_inputs))) fixed_inputs = { key if np.issubdtype(type(key), np.integer) else pos_index[key]: value for key, value in self.right.items() } left_inputs = [ fixed_inputs[ind] if ind in fixed_inputs.keys() else inp for ind, inp in enumerate(left_inputs) ] leftval = self.left.evaluate(*itertools.chain(left_inputs, left_params)) if op == 'fix_inputs': return leftval right_inputs = self._get_right_inputs_from_args(args) right_params = self._get_right_params_from_args(args) if op == "|": if isinstance(leftval, tuple): return self.right.evaluate(*itertools.chain(leftval, right_params)) else: return self.right.evaluate(leftval, *right_params) else: rightval = self.right.evaluate(*itertools.chain(right_inputs, right_params)) return self._apply_operators_to_value_lists(leftval, rightval, **kw) @property def n_submodels(self): if self._leaflist is None: self._make_leaflist() return len(self._leaflist) @property def submodel_names(self): """ Return the names of submodels in a ``CompoundModel``.""" if self._leaflist is None: self._make_leaflist() names = [item.name for item in self._leaflist] nonecount = 0 newnames = [] for item in names: if item is None: newnames.append(f'None_{nonecount}') nonecount += 1 else: newnames.append(item) return tuple(newnames) def both_inverses_exist(self): """ if both members of this compound model have inverses return True """ import warnings from astropy.utils.exceptions import AstropyDeprecationWarning warnings.warn( "CompoundModel.both_inverses_exist is deprecated. " "Use has_inverse instead.", AstropyDeprecationWarning ) try: self.left.inverse self.right.inverse except NotImplementedError: return False return True def _pre_evaluate(self, *args, **kwargs): """ CompoundModel specific input setup that needs to occur prior to model evaluation. Note ---- All of the _pre_evaluate for each component model will be performed at the time that the individual model is evaluated. """ # If equivalencies are provided, necessary to map parameters and pass # the leaflist as a keyword input for use by model evaluation so that # the compound model input names can be matched to the model input # names. if 'equivalencies' in kwargs: # Restructure to be useful for the individual model lookup kwargs['inputs_map'] = [(value[0], (value[1], key)) for key, value in self.inputs_map().items()] # Setup actual model evaluation method def evaluate(_inputs): return self._evaluate(*_inputs, **kwargs) return evaluate, args, None, kwargs @property def _argnames(self): """No inputs should be used to determine input_shape when handling compound models""" return () def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs): """ CompoundModel specific post evaluation processing of outputs Note ---- All of the _post_evaluate for each component model will be performed at the time that the individual model is evaluated. """ if self.get_bounding_box(with_bbox) is not None and self.n_outputs == 1: return outputs[0] return outputs def _evaluate(self, *args, **kw): op = self.op if op != 'fix_inputs': if op != '&': leftval = self.left(*args, **kw) if op != '|': rightval = self.right(*args, **kw) else: rightval = None else: leftval = self.left(*(args[:self.left.n_inputs]), **kw) rightval = self.right(*(args[self.left.n_inputs:]), **kw) if op != "|": return self._apply_operators_to_value_lists(leftval, rightval, **kw) elif op == '|': if isinstance(leftval, tuple): return self.right(*leftval, **kw) else: return self.right(leftval, **kw) else: subs = self.right newargs = list(args) subinds = [] subvals = [] for key in subs.keys(): if np.issubdtype(type(key), np.integer): subinds.append(key) elif isinstance(key, str): ind = self.left.inputs.index(key) subinds.append(ind) subvals.append(subs[key]) # Turn inputs specified in kw into positional indices. # Names for compound inputs do not propagate to sub models. kwind = [] kwval = [] for kwkey in list(kw.keys()): if kwkey in self.inputs: ind = self.inputs.index(kwkey) if ind < len(args): raise ValueError("Keyword argument duplicates " "positional value supplied.") kwind.append(ind) kwval.append(kw[kwkey]) del kw[kwkey] # Build new argument list # Append keyword specified args first if kwind: kwargs = list(zip(kwind, kwval)) kwargs.sort() kwindsorted, kwvalsorted = list(zip(*kwargs)) newargs = newargs + list(kwvalsorted) if subinds: subargs = list(zip(subinds, subvals)) subargs.sort() # subindsorted, subvalsorted = list(zip(*subargs)) # The substitutions must be inserted in order for ind, val in subargs: newargs.insert(ind, val) return self.left(*newargs, **kw) @property def param_names(self): """ An ordered list of parameter names.""" return self._param_names def _make_leaflist(self): tdict = {} leaflist = [] make_subtree_dict(self, '', tdict, leaflist) self._leaflist = leaflist self._tdict = tdict def __getattr__(self, name): """ If someone accesses an attribute not already defined, map the parameters, and then see if the requested attribute is one of the parameters """ # The following test is needed to avoid infinite recursion # caused by deepcopy. There may be other such cases discovered. if name == '__setstate__': raise AttributeError if name in self._param_names: return self.__dict__[name] else: raise AttributeError(f'Attribute "{name}" not found') def __getitem__(self, index): if self._leaflist is None: self._make_leaflist() leaflist = self._leaflist tdict = self._tdict if isinstance(index, slice): if index.step: raise ValueError('Steps in slices not supported ' 'for compound models') if index.start is not None: if isinstance(index.start, str): start = self._str_index_to_int(index.start) else: start = index.start else: start = 0 if index.stop is not None: if isinstance(index.stop, str): stop = self._str_index_to_int(index.stop) else: stop = index.stop - 1 else: stop = len(leaflist) - 1 if index.stop == 0: raise ValueError("Slice endpoint cannot be 0") if start < 0: start = len(leaflist) + start if stop < 0: stop = len(leaflist) + stop # now search for matching node: if stop == start: # only single value, get leaf instead in code below index = start else: for key in tdict: node, leftind, rightind = tdict[key] if leftind == start and rightind == stop: return node raise IndexError("No appropriate subtree matches slice") if np.issubdtype(type(index), np.integer): return leaflist[index] elif isinstance(index, str): return leaflist[self._str_index_to_int(index)] else: raise TypeError('index must be integer, slice, or model name string') def _str_index_to_int(self, str_index): # Search through leaflist for item with that name found = [] for nleaf, leaf in enumerate(self._leaflist): if getattr(leaf, 'name', None) == str_index: found.append(nleaf) if len(found) == 0: raise IndexError(f"No component with name '{str_index}' found") if len(found) > 1: raise IndexError(f"Multiple components found using '{str_index}' as name\n" f"at indices {found}") return found[0] @property def n_inputs(self): """ The number of inputs of a model.""" return self._n_inputs @n_inputs.setter def n_inputs(self, value): self._n_inputs = value @property def n_outputs(self): """ The number of outputs of a model.""" return self._n_outputs @n_outputs.setter def n_outputs(self, value): self._n_outputs = value @property def eqcons(self): return self._eqcons @eqcons.setter def eqcons(self, value): self._eqcons = value @property def ineqcons(self): return self._eqcons @ineqcons.setter def ineqcons(self, value): self._eqcons = value def traverse_postorder(self, include_operator=False): """ Postorder traversal of the CompoundModel tree.""" res = [] if isinstance(self.left, CompoundModel): res = res + self.left.traverse_postorder(include_operator) else: res = res + [self.left] if isinstance(self.right, CompoundModel): res = res + self.right.traverse_postorder(include_operator) else: res = res + [self.right] if include_operator: res.append(self.op) else: res.append(self) return res def _format_expression(self, format_leaf=None): leaf_idx = 0 operands = deque() if format_leaf is None: format_leaf = lambda i, l: f'[{i}]' # noqa: E731 for node in self.traverse_postorder(): if not isinstance(node, CompoundModel): operands.append(format_leaf(leaf_idx, node)) leaf_idx += 1 continue right = operands.pop() left = operands.pop() if node.op in OPERATOR_PRECEDENCE: oper_order = OPERATOR_PRECEDENCE[node.op] if isinstance(node, CompoundModel): if (isinstance(node.left, CompoundModel) and OPERATOR_PRECEDENCE[node.left.op] < oper_order): left = f'({left})' if (isinstance(node.right, CompoundModel) and OPERATOR_PRECEDENCE[node.right.op] < oper_order): right = f'({right})' operands.append(' '.join((left, node.op, right))) else: left = f'(({left}),' right = f'({right}))' operands.append(' '.join((node.op[0], left, right))) return ''.join(operands) def _format_components(self): if self._parameters_ is None: self._map_parameters() return "\n\n".join(f"[{idx}]: {m!r}" for idx, m in enumerate(self._leaflist)) def __str__(self): expression = self._format_expression() components = self._format_components() keywords = [ ('Expression', expression), ('Components', '\n' + indent(components)) ] return super()._format_str(keywords=keywords) def rename(self, name): self.name = name return self @property def isleaf(self): return False @property def inverse(self): if self.op == '|': return self.right.inverse | self.left.inverse elif self.op == '&': return self.left.inverse & self.right.inverse else: return NotImplemented @property def fittable(self): """ Set the fittable attribute on a compound model.""" if self._fittable is None: if self._leaflist is None: self._map_parameters() self._fittable = all(m.fittable for m in self._leaflist) return self._fittable __add__ = _model_oper('+') __sub__ = _model_oper('-') __mul__ = _model_oper('*') __truediv__ = _model_oper('/') __pow__ = _model_oper('**') __or__ = _model_oper('|') __and__ = _model_oper('&') def _map_parameters(self): """ Map all the constituent model parameters to the compound object, renaming as necessary by appending a suffix number. This can be an expensive operation, particularly for a complex expression tree. All the corresponding parameter attributes are created that one expects for the Model class. The parameter objects that the attributes point to are the same objects as in the constiutent models. Changes made to parameter values to either are seen by both. Prior to calling this, none of the associated attributes will exist. This method must be called to make the model usable by fitting engines. If oldnames=True, then parameters are named as in the original implementation of compound models. """ if self._parameters is not None: # do nothing return if self._leaflist is None: self._make_leaflist() self._parameters_ = {} param_map = {} self._param_names = [] for lindex, leaf in enumerate(self._leaflist): if not isinstance(leaf, dict): for param_name in leaf.param_names: param = getattr(leaf, param_name) new_param_name = f"{param_name}_{lindex}" self.__dict__[new_param_name] = param self._parameters_[new_param_name] = param self._param_names.append(new_param_name) param_map[new_param_name] = (lindex, param_name) self._param_metrics = {} self._param_map = param_map self._param_map_inverse = dict((v, k) for k, v in param_map.items()) self._initialize_slices() self._param_names = tuple(self._param_names) def _initialize_slices(self): param_metrics = self._param_metrics total_size = 0 for name in self.param_names: param = getattr(self, name) value = param.value param_size = np.size(value) param_shape = np.shape(value) param_slice = slice(total_size, total_size + param_size) param_metrics[name] = {} param_metrics[name]['slice'] = param_slice param_metrics[name]['shape'] = param_shape param_metrics[name]['size'] = param_size total_size += param_size self._parameters = np.empty(total_size, dtype=np.float64) @staticmethod def _recursive_lookup(branch, adict, key): if isinstance(branch, CompoundModel): return adict[key] return branch, key def inputs_map(self): """ Map the names of the inputs to this ExpressionTree to the inputs to the leaf models. """ inputs_map = {} if not isinstance(self.op, str): # If we don't have an operator the mapping is trivial return {inp: (self, inp) for inp in self.inputs} elif self.op == '|': if isinstance(self.left, CompoundModel): l_inputs_map = self.left.inputs_map() for inp in self.inputs: if isinstance(self.left, CompoundModel): inputs_map[inp] = l_inputs_map[inp] else: inputs_map[inp] = self.left, inp elif self.op == '&': if isinstance(self.left, CompoundModel): l_inputs_map = self.left.inputs_map() if isinstance(self.right, CompoundModel): r_inputs_map = self.right.inputs_map() for i, inp in enumerate(self.inputs): if i < len(self.left.inputs): # Get from left if isinstance(self.left, CompoundModel): inputs_map[inp] = l_inputs_map[self.left.inputs[i]] else: inputs_map[inp] = self.left, self.left.inputs[i] else: # Get from right if isinstance(self.right, CompoundModel): inputs_map[inp] = r_inputs_map[self.right.inputs[i - len(self.left.inputs)]] else: inputs_map[inp] = self.right, self.right.inputs[i - len(self.left.inputs)] elif self.op == 'fix_inputs': fixed_ind = list(self.right.keys()) ind = [list(self.left.inputs).index(i) if isinstance(i, str) else i for i in fixed_ind] inp_ind = list(range(self.left.n_inputs)) for i in ind: inp_ind.remove(i) for i in inp_ind: inputs_map[self.left.inputs[i]] = self.left, self.left.inputs[i] else: if isinstance(self.left, CompoundModel): l_inputs_map = self.left.inputs_map() for inp in self.left.inputs: if isinstance(self.left, CompoundModel): inputs_map[inp] = l_inputs_map[inp] else: inputs_map[inp] = self.left, inp return inputs_map def _parameter_units_for_data_units(self, input_units, output_units): if self._leaflist is None: self._map_parameters() units_for_data = {} for imodel, model in enumerate(self._leaflist): units_for_data_leaf = model._parameter_units_for_data_units(input_units, output_units) for param_leaf in units_for_data_leaf: param = self._param_map_inverse[(imodel, param_leaf)] units_for_data[param] = units_for_data_leaf[param_leaf] return units_for_data @property def input_units(self): inputs_map = self.inputs_map() input_units_dict = {key: inputs_map[key][0].input_units[orig_key] for key, (mod, orig_key) in inputs_map.items() if inputs_map[key][0].input_units is not None} if input_units_dict: return input_units_dict return None @property def input_units_equivalencies(self): inputs_map = self.inputs_map() input_units_equivalencies_dict = { key: inputs_map[key][0].input_units_equivalencies[orig_key] for key, (mod, orig_key) in inputs_map.items() if inputs_map[key][0].input_units_equivalencies is not None } if not input_units_equivalencies_dict: return None return input_units_equivalencies_dict @property def input_units_allow_dimensionless(self): inputs_map = self.inputs_map() return {key: inputs_map[key][0].input_units_allow_dimensionless[orig_key] for key, (mod, orig_key) in inputs_map.items()} @property def input_units_strict(self): inputs_map = self.inputs_map() return {key: inputs_map[key][0].input_units_strict[orig_key] for key, (mod, orig_key) in inputs_map.items()} @property def return_units(self): outputs_map = self.outputs_map() return {key: outputs_map[key][0].return_units[orig_key] for key, (mod, orig_key) in outputs_map.items() if outputs_map[key][0].return_units is not None} def outputs_map(self): """ Map the names of the outputs to this ExpressionTree to the outputs to the leaf models. """ outputs_map = {} if not isinstance(self.op, str): # If we don't have an operator the mapping is trivial return {out: (self, out) for out in self.outputs} elif self.op == '|': if isinstance(self.right, CompoundModel): r_outputs_map = self.right.outputs_map() for out in self.outputs: if isinstance(self.right, CompoundModel): outputs_map[out] = r_outputs_map[out] else: outputs_map[out] = self.right, out elif self.op == '&': if isinstance(self.left, CompoundModel): l_outputs_map = self.left.outputs_map() if isinstance(self.right, CompoundModel): r_outputs_map = self.right.outputs_map() for i, out in enumerate(self.outputs): if i < len(self.left.outputs): # Get from left if isinstance(self.left, CompoundModel): outputs_map[out] = l_outputs_map[self.left.outputs[i]] else: outputs_map[out] = self.left, self.left.outputs[i] else: # Get from right if isinstance(self.right, CompoundModel): outputs_map[out] = r_outputs_map[self.right.outputs[ i - len(self.left.outputs)]] else: outputs_map[out] = self.right, self.right.outputs[ i - len(self.left.outputs)] elif self.op == 'fix_inputs': return self.left.outputs_map() else: if isinstance(self.left, CompoundModel): l_outputs_map = self.left.outputs_map() for out in self.left.outputs: if isinstance(self.left, CompoundModel): outputs_map[out] = l_outputs_map()[out] else: outputs_map[out] = self.left, out return outputs_map @property def has_user_bounding_box(self): """ A flag indicating whether or not a custom bounding_box has been assigned to this model by a user, via assignment to ``model.bounding_box``. """ return self._user_bounding_box is not None def render(self, out=None, coords=None): """ Evaluate a model at fixed positions, respecting the ``bounding_box``. The key difference relative to evaluating the model directly is that this method is limited to a bounding box if the `Model.bounding_box` attribute is set. Parameters ---------- out : `numpy.ndarray`, optional An array that the evaluated model will be added to. If this is not given (or given as ``None``), a new array will be created. coords : array-like, optional An array to be used to translate from the model's input coordinates to the ``out`` array. It should have the property that ``self(coords)`` yields the same shape as ``out``. If ``out`` is not specified, ``coords`` will be used to determine the shape of the returned array. If this is not provided (or None), the model will be evaluated on a grid determined by `Model.bounding_box`. Returns ------- out : `numpy.ndarray` The model added to ``out`` if ``out`` is not ``None``, or else a new array from evaluating the model over ``coords``. If ``out`` and ``coords`` are both `None`, the returned array is limited to the `Model.bounding_box` limits. If `Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed. Raises ------ ValueError If ``coords`` are not given and the the `Model.bounding_box` of this model is not set. Examples -------- :ref:`astropy:bounding-boxes` """ bbox = self.get_bounding_box() ndim = self.n_inputs if (coords is None) and (out is None) and (bbox is None): raise ValueError('If no bounding_box is set, ' 'coords or out must be input.') # for consistent indexing if ndim == 1: if coords is not None: coords = [coords] if bbox is not None: bbox = [bbox] if coords is not None: coords = np.asanyarray(coords, dtype=float) # Check dimensions match out and model assert len(coords) == ndim if out is not None: if coords[0].shape != out.shape: raise ValueError('inconsistent shape of the output.') else: out = np.zeros(coords[0].shape) if out is not None: out = np.asanyarray(out) if out.ndim != ndim: raise ValueError('the array and model must have the same ' 'number of dimensions.') if bbox is not None: # Assures position is at center pixel, important when using # add_array. pd = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox]).astype(int).T pos, delta = pd if coords is not None: sub_shape = tuple(delta * 2 + 1) sub_coords = np.array([extract_array(c, sub_shape, pos) for c in coords]) else: limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T] sub_coords = np.mgrid[limits] sub_coords = sub_coords[::-1] if out is None: out = self(*sub_coords) else: try: out = add_array(out, self(*sub_coords), pos) except ValueError: raise ValueError( 'The `bounding_box` is larger than the input out in ' 'one or more dimensions. Set ' '`model.bounding_box = None`.') else: if coords is None: im_shape = out.shape limits = [slice(i) for i in im_shape] coords = np.mgrid[limits] coords = coords[::-1] out += self(*coords) return out def replace_submodel(self, name, model): """ Construct a new `~astropy.modeling.CompoundModel` instance from an existing CompoundModel, replacing the named submodel with a new model. In order to ensure that inverses and names are kept/reconstructed, it's necessary to rebuild the CompoundModel from the replaced node all the way back to the base. The original CompoundModel is left untouched. Parameters ---------- name : str name of submodel to be replaced model : `~astropy.modeling.Model` replacement model """ submodels = [m for m in self.traverse_postorder() if getattr(m, 'name', None) == name] if submodels: if len(submodels) > 1: raise ValueError(f"More than one submodel named {name}") old_model = submodels.pop() if len(old_model) != len(model): raise ValueError("New and old models must have equal values " "for n_models") # Do this check first in order to raise a more helpful Exception, # although it would fail trying to construct the new CompoundModel if (old_model.n_inputs != model.n_inputs or old_model.n_outputs != model.n_outputs): raise ValueError("New model must match numbers of inputs and " "outputs of existing model") tree = _get_submodel_path(self, name) while tree: branch = self.copy() for node in tree[:-1]: branch = getattr(branch, node) setattr(branch, tree[-1], model) model = CompoundModel(branch.op, branch.left, branch.right, name=branch.name) tree = tree[:-1] return model else: raise ValueError(f"No submodels found named {name}") def _set_sub_models_and_parameter_units(self, left, right): """ Provides a work-around to properly set the sub models and respective parameters's units/values when using ``without_units_for_data`` or ``without_units_for_data`` methods. """ model = CompoundModel(self.op, left, right) self.left = left self.right = right for name in model.param_names: model_parameter = getattr(model, name) parameter = getattr(self, name) parameter.value = model_parameter.value parameter._set_unit(model_parameter.unit, force=True) def without_units_for_data(self, **kwargs): """ See `~astropy.modeling.Model.without_units_for_data` for overview of this method. Notes ----- This modifies the behavior of the base method to account for the case where the sub-models of a compound model have different output units. This is only valid for compound * and / compound models as in that case it is reasonable to mix the output units. It does this by modifying the output units of each sub model by using the output units of the other sub model so that we can apply the original function and get the desired result. Additional data has to be output in the mixed output unit case so that the units can be properly rebuilt by `~astropy.modeling.CompoundModel.with_units_from_data`. Outside the mixed output units, this method is identical to the base method. """ if self.op in ['*', '/']: model = self.copy() inputs = {inp: kwargs[inp] for inp in self.inputs} left_units = self.left.output_units(**kwargs) right_units = self.right.output_units(**kwargs) if self.op == '*': left_kwargs = {out: kwargs[out] / right_units[out] for out in self.left.outputs if kwargs[out] is not None} right_kwargs = {out: kwargs[out] / left_units[out] for out in self.right.outputs if kwargs[out] is not None} else: left_kwargs = {out: kwargs[out] * right_units[out] for out in self.left.outputs if kwargs[out] is not None} right_kwargs = {out: 1 / kwargs[out] * left_units[out] for out in self.right.outputs if kwargs[out] is not None} left_kwargs.update(inputs.copy()) right_kwargs.update(inputs.copy()) left = self.left.without_units_for_data(**left_kwargs) if isinstance(left, tuple): left_kwargs['_left_kwargs'] = left[1] left_kwargs['_right_kwargs'] = left[2] left = left[0] right = self.right.without_units_for_data(**right_kwargs) if isinstance(right, tuple): right_kwargs['_left_kwargs'] = right[1] right_kwargs['_right_kwargs'] = right[2] right = right[0] model._set_sub_models_and_parameter_units(left, right) return model, left_kwargs, right_kwargs else: return super().without_units_for_data(**kwargs) def with_units_from_data(self, **kwargs): """ See `~astropy.modeling.Model.with_units_from_data` for overview of this method. Notes ----- This modifies the behavior of the base method to account for the case where the sub-models of a compound model have different output units. This is only valid for compound * and / compound models as in that case it is reasonable to mix the output units. In order to do this it requires some additional information output by `~astropy.modeling.CompoundModel.without_units_for_data` passed as keyword arguments under the keywords ``_left_kwargs`` and ``_right_kwargs``. Outside the mixed output units, this method is identical to the base method. """ if self.op in ['*', '/']: left_kwargs = kwargs.pop('_left_kwargs') right_kwargs = kwargs.pop('_right_kwargs') left = self.left.with_units_from_data(**left_kwargs) right = self.right.with_units_from_data(**right_kwargs) model = self.copy() model._set_sub_models_and_parameter_units(left, right) return model else: return super().with_units_from_data(**kwargs) def _get_submodel_path(model, name): """Find the route down a CompoundModel's tree to the model with the specified name (whether it's a leaf or not)""" if getattr(model, 'name', None) == name: return [] try: return ['left'] + _get_submodel_path(model.left, name) except (AttributeError, TypeError): pass try: return ['right'] + _get_submodel_path(model.right, name) except (AttributeError, TypeError): pass def binary_operation(binoperator, left, right): ''' Perform binary operation. Operands may be matching tuples of operands. ''' if isinstance(left, tuple) and isinstance(right, tuple): return tuple([binoperator(item[0], item[1]) for item in zip(left, right)]) return binoperator(left, right) def get_ops(tree, opset): """ Recursive function to collect operators used. """ if isinstance(tree, CompoundModel): opset.add(tree.op) get_ops(tree.left, opset) get_ops(tree.right, opset) else: return def make_subtree_dict(tree, nodepath, tdict, leaflist): ''' Traverse a tree noting each node by a key that indicates all the left/right choices necessary to reach that node. Each key will reference a tuple that contains: - reference to the compound model for that node. - left most index contained within that subtree (relative to all indices for the whole tree) - right most index contained within that subtree ''' # if this is a leaf, just append it to the leaflist if not hasattr(tree, 'isleaf'): leaflist.append(tree) else: leftmostind = len(leaflist) make_subtree_dict(tree.left, nodepath+'l', tdict, leaflist) make_subtree_dict(tree.right, nodepath+'r', tdict, leaflist) rightmostind = len(leaflist)-1 tdict[nodepath] = (tree, leftmostind, rightmostind) _ORDER_OF_OPERATORS = [('fix_inputs',), ('|',), ('&',), ('+', '-'), ('*', '/'), ('**',)] OPERATOR_PRECEDENCE = {} for idx, ops in enumerate(_ORDER_OF_OPERATORS): for op in ops: OPERATOR_PRECEDENCE[op] = idx del idx, op, ops def fix_inputs(modelinstance, values, bounding_boxes=None, selector_args=None): """ This function creates a compound model with one or more of the input values of the input model assigned fixed values (scalar or array). Parameters ---------- modelinstance : `~astropy.modeling.Model` instance This is the model that one or more of the model input values will be fixed to some constant value. values : dict A dictionary where the key identifies which input to fix and its value is the value to fix it at. The key may either be the name of the input or a number reflecting its order in the inputs. Examples -------- >>> from astropy.modeling.models import Gaussian2D >>> g = Gaussian2D(1, 2, 3, 4, 5) >>> gv = fix_inputs(g, {0: 2.5}) Results in a 1D function equivalent to Gaussian2D(1, 2, 3, 4, 5)(x=2.5, y) """ model = CompoundModel('fix_inputs', modelinstance, values) if bounding_boxes is not None: if selector_args is None: selector_args = tuple([(key, True) for key in values.keys()]) bbox = CompoundBoundingBox.validate(modelinstance, bounding_boxes, selector_args) _selector = bbox.selector_args.get_fixed_values(modelinstance, values) new_bbox = bbox[_selector] new_bbox = new_bbox.__class__.validate(model, new_bbox) model.bounding_box = new_bbox return model def bind_bounding_box(modelinstance, bounding_box, ignored=None, order='C'): """ Set a validated bounding box to a model instance. Parameters ---------- modelinstance : `~astropy.modeling.Model` instance This is the model that the validated bounding box will be set on. bounding_box : tuple A bounding box tuple, see :ref:`astropy:bounding-boxes` for details ignored : list List of the inputs to be ignored by the bounding box. order : str, optional The ordering of the bounding box tuple, can be either ``'C'`` or ``'F'``. """ modelinstance.bounding_box = ModelBoundingBox.validate(modelinstance, bounding_box, ignored=ignored, order=order) def bind_compound_bounding_box(modelinstance, bounding_boxes, selector_args, create_selector=None, ignored=None, order='C'): """ Add a validated compound bounding box to a model instance. Parameters ---------- modelinstance : `~astropy.modeling.Model` instance This is the model that the validated compound bounding box will be set on. bounding_boxes : dict A dictionary of bounding box tuples, see :ref:`astropy:bounding-boxes` for details. selector_args : list List of selector argument tuples to define selection for compound bounding box, see :ref:`astropy:bounding-boxes` for details. create_selector : callable, optional An optional callable with interface (selector_value, model) which can generate a bounding box based on a selector value and model if there is no bounding box in the compound bounding box listed under that selector value. Default is ``None``, meaning new bounding box entries will not be automatically generated. ignored : list List of the inputs to be ignored by the bounding box. order : str, optional The ordering of the bounding box tuple, can be either ``'C'`` or ``'F'``. """ modelinstance.bounding_box = CompoundBoundingBox.validate(modelinstance, bounding_boxes, selector_args, create_selector=create_selector, ignored=ignored, order=order) def custom_model(*args, fit_deriv=None): """ Create a model from a user defined function. The inputs and parameters of the model will be inferred from the arguments of the function. This can be used either as a function or as a decorator. See below for examples of both usages. The model is separable only if there is a single input. .. note:: All model parameters have to be defined as keyword arguments with default values in the model function. Use `None` as a default argument value if you do not want to have a default value for that parameter. The standard settable model properties can be configured by default using keyword arguments matching the name of the property; however, these values are not set as model "parameters". Moreover, users cannot use keyword arguments matching non-settable model properties, with the exception of ``n_outputs`` which should be set to the number of outputs of your function. Parameters ---------- func : function Function which defines the model. It should take N positional arguments where ``N`` is dimensions of the model (the number of independent variable in the model), and any number of keyword arguments (the parameters). It must return the value of the model (typically as an array, but can also be a scalar for scalar inputs). This corresponds to the `~astropy.modeling.Model.evaluate` method. fit_deriv : function, optional Function which defines the Jacobian derivative of the model. I.e., the derivative with respect to the *parameters* of the model. It should have the same argument signature as ``func``, but should return a sequence where each element of the sequence is the derivative with respect to the corresponding argument. This corresponds to the :meth:`~astropy.modeling.FittableModel.fit_deriv` method. Examples -------- Define a sinusoidal model function as a custom 1D model:: >>> from astropy.modeling.models import custom_model >>> import numpy as np >>> def sine_model(x, amplitude=1., frequency=1.): ... return amplitude * np.sin(2 * np.pi * frequency * x) >>> def sine_deriv(x, amplitude=1., frequency=1.): ... return 2 * np.pi * amplitude * np.cos(2 * np.pi * frequency * x) >>> SineModel = custom_model(sine_model, fit_deriv=sine_deriv) Create an instance of the custom model and evaluate it:: >>> model = SineModel() >>> model(0.25) 1.0 This model instance can now be used like a usual astropy model. The next example demonstrates a 2D Moffat function model, and also demonstrates the support for docstrings (this example could also include a derivative, but it has been omitted for simplicity):: >>> @custom_model ... def Moffat2D(x, y, amplitude=1.0, x_0=0.0, y_0=0.0, gamma=1.0, ... alpha=1.0): ... \"\"\"Two dimensional Moffat function.\"\"\" ... rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2 ... return amplitude * (1 + rr_gg) ** (-alpha) ... >>> print(Moffat2D.__doc__) Two dimensional Moffat function. >>> model = Moffat2D() >>> model(1, 1) # doctest: +FLOAT_CMP 0.3333333333333333 """ if len(args) == 1 and callable(args[0]): return _custom_model_wrapper(args[0], fit_deriv=fit_deriv) elif not args: return functools.partial(_custom_model_wrapper, fit_deriv=fit_deriv) else: raise TypeError( f"{__name__} takes at most one positional argument (the callable/" "function to be turned into a model. When used as a decorator " "it should be passed keyword arguments only (if " "any).") def _custom_model_inputs(func): """ Processes the inputs to the `custom_model`'s function into the appropriate categories. Parameters ---------- func : callable Returns ------- inputs : list list of evaluation inputs special_params : dict dictionary of model properties which require special treatment settable_params : dict dictionary of defaults for settable model properties params : dict dictionary of model parameters set by `custom_model`'s function """ inputs, parameters = get_inputs_and_params(func) special = ['n_outputs'] settable = [attr for attr, value in vars(Model).items() if isinstance(value, property) and value.fset is not None] properties = [attr for attr, value in vars(Model).items() if isinstance(value, property) and value.fset is None and attr not in special] special_params = {} settable_params = {} params = {} for param in parameters: if param.name in special: special_params[param.name] = param.default elif param.name in settable: settable_params[param.name] = param.default elif param.name in properties: raise ValueError(f"Parameter '{param.name}' cannot be a model property: {properties}.") else: params[param.name] = param.default return inputs, special_params, settable_params, params def _custom_model_wrapper(func, fit_deriv=None): """ Internal implementation `custom_model`. When `custom_model` is called as a function its arguments are passed to this function, and the result of this function is returned. When `custom_model` is used as a decorator a partial evaluation of this function is returned by `custom_model`. """ if not callable(func): raise ModelDefinitionError( "func is not callable; it must be a function or other callable " "object") if fit_deriv is not None and not callable(fit_deriv): raise ModelDefinitionError( "fit_deriv not callable; it must be a function or other " "callable object") model_name = func.__name__ inputs, special_params, settable_params, params = _custom_model_inputs(func) if (fit_deriv is not None and len(fit_deriv.__defaults__) != len(params)): raise ModelDefinitionError("derivative function should accept " "same number of parameters as func.") params = {param: Parameter(param, default=default) for param, default in params.items()} mod = find_current_module(2) if mod: modname = mod.__name__ else: modname = '__main__' members = { '__module__': str(modname), '__doc__': func.__doc__, 'n_inputs': len(inputs), 'n_outputs': special_params.pop('n_outputs', 1), 'evaluate': staticmethod(func), '_settable_properties': settable_params } if fit_deriv is not None: members['fit_deriv'] = staticmethod(fit_deriv) members.update(params) cls = type(model_name, (FittableModel,), members) cls._separable = True if (len(inputs) == 1) else False return cls def render_model(model, arr=None, coords=None): """ Evaluates a model on an input array. Evaluation is limited to a bounding box if the `Model.bounding_box` attribute is set. Parameters ---------- model : `Model` Model to be evaluated. arr : `numpy.ndarray`, optional Array on which the model is evaluated. coords : array-like, optional Coordinate arrays mapping to ``arr``, such that ``arr[coords] == arr``. Returns ------- array : `numpy.ndarray` The model evaluated on the input ``arr`` or a new array from ``coords``. If ``arr`` and ``coords`` are both `None`, the returned array is limited to the `Model.bounding_box` limits. If `Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed. Examples -------- :ref:`astropy:bounding-boxes` """ bbox = model.bounding_box if (coords is None) & (arr is None) & (bbox is None): raise ValueError('If no bounding_box is set,' 'coords or arr must be input.') # for consistent indexing if model.n_inputs == 1: if coords is not None: coords = [coords] if bbox is not None: bbox = [bbox] if arr is not None: arr = arr.copy() # Check dimensions match model if arr.ndim != model.n_inputs: raise ValueError('number of array dimensions inconsistent with ' 'number of model inputs.') if coords is not None: # Check dimensions match arr and model coords = np.array(coords) if len(coords) != model.n_inputs: raise ValueError('coordinate length inconsistent with the number ' 'of model inputs.') if arr is not None: if coords[0].shape != arr.shape: raise ValueError('coordinate shape inconsistent with the ' 'array shape.') else: arr = np.zeros(coords[0].shape) if bbox is not None: # assures position is at center pixel, important when using add_array pd = pos, delta = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox]).astype(int).T if coords is not None: sub_shape = tuple(delta * 2 + 1) sub_coords = np.array([extract_array(c, sub_shape, pos) for c in coords]) else: limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T] sub_coords = np.mgrid[limits] sub_coords = sub_coords[::-1] if arr is None: arr = model(*sub_coords) else: try: arr = add_array(arr, model(*sub_coords), pos) except ValueError: raise ValueError('The `bounding_box` is larger than the input' ' arr in one or more dimensions. Set ' '`model.bounding_box = None`.') else: if coords is None: im_shape = arr.shape limits = [slice(i) for i in im_shape] coords = np.mgrid[limits] arr += model(*coords[::-1]) return arr def hide_inverse(model): """ This is a convenience function intended to disable automatic generation of the inverse in compound models by disabling one of the constituent model's inverse. This is to handle cases where user provided inverse functions are not compatible within an expression. Example: compound_model.inverse = hide_inverse(m1) + m2 + m3 This will insure that the defined inverse itself won't attempt to build its own inverse, which would otherwise fail in this example (e.g., m = m1 + m2 + m3 happens to raises an exception for this reason.) Note that this permanently disables it. To prevent that either copy the model or restore the inverse later. """ del model.inverse return model
9c6aaf3b3680155b9a33108b3b4481369a3191d41b39ea841669af5ac8d46b23
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Convolution Model""" # pylint: disable=line-too-long, too-many-lines, too-many-arguments, invalid-name import numpy as np from .core import CompoundModel class Convolution(CompoundModel): """ Wrapper class for a convolution model. Parameters ---------- operator: tuple The SPECIAL_OPERATORS entry for the convolution being used. model : Model The model for the convolution. kernel: Model The kernel model for the convolution. bounding_box : tuple A bounding box to define the limits of the integration approximation for the convolution. resolution : float The resolution for the approximation of the convolution. cache : bool, optional Allow convolution computation to be cached for reuse. This is enabled by default. Notes ----- This is wrapper is necessary to handle the limitations of the pseudospectral convolution binary operator implemented in astropy.convolution under `~astropy.convolution.convolve_fft`. In this `~astropy.convolution.convolve_fft` it is assumed that the inputs ``array`` and ``kernel`` span a sufficient portion of the support of the functions of the convolution. Consequently, the ``Compound`` created by the `~astropy.convolution.convolve_models` function makes the assumption that one should pass an input array that sufficiently spans this space. This means that slightly different input arrays to this model will result in different outputs, even on points of intersection between these arrays. This issue is solved by requiring a ``bounding_box`` together with a resolution so that one can pre-calculate the entire domain and then (by default) cache the convolution values. The function then just interpolates the results from this cache. """ def __init__(self, operator, model, kernel, bounding_box, resolution, cache=True): super().__init__(operator, model, kernel) self.bounding_box = bounding_box self._resolution = resolution self._cache_convolution = cache self._kwargs = None self._convolution = None def clear_cache(self): """ Clears the cached convolution """ self._kwargs = None self._convolution = None def _get_convolution(self, **kwargs): if (self._convolution is None) or (self._kwargs != kwargs): domain = self.bounding_box.domain(self._resolution) mesh = np.meshgrid(*domain) data = super().__call__(*mesh, **kwargs) from scipy.interpolate import RegularGridInterpolator convolution = RegularGridInterpolator(domain, data) if self._cache_convolution: self._kwargs = kwargs self._convolution = convolution else: convolution = self._convolution return convolution @staticmethod def _convolution_inputs(*args): not_scalar = np.where([not np.isscalar(arg) for arg in args])[0] if len(not_scalar) == 0: return np.array(args), (1,) else: output_shape = args[not_scalar[0]].shape if not all(args[index].shape == output_shape for index in not_scalar): raise ValueError('Values have differing shapes') inputs = [] for arg in args: if np.isscalar(arg): inputs.append(np.full(output_shape, arg)) else: inputs.append(arg) return np.reshape(inputs, (len(inputs), -1)).T, output_shape @staticmethod def _convolution_outputs(outputs, output_shape): return outputs.reshape(output_shape) def __call__(self, *args, **kw): inputs, output_shape = self._convolution_inputs(*args) convolution = self._get_convolution(**kw) outputs = convolution(inputs) return self._convolution_outputs(outputs, output_shape)
db775a3dfa622918ef6cdd6937f0ccff195faa8589f2709d6a16cd307d07cabb
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This subpackage provides a framework for representing models and performing model evaluation and fitting. It supports 1D and 2D models and fitting with parameter constraints. It has some predefined models and fitting routines. """ from . import fitting, models # noqa: F401, F403 from .core import * # noqa: F401, F403 from .parameters import * # noqa: F401, F403 from .separable import * # noqa: F401, F403
23a795000b38f635cab2565c00ded2e010cc398e489a80f9069e59a2e6d8448e
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Creates a common namespace for all pre-defined models. """ # pylint: disable=unused-wildcard-import, unused-import, wildcard-import from . import math_functions as math # noqa: F401, F403 from .core import custom_model, fix_inputs, hide_inverse # pylint: disable=W0611 # noqa: F401, F403 from .functional_models import * # noqa: F401, F403 from .mappings import * # noqa: F401, F403 from .physical_models import * # noqa: F401, F403 from .polynomial import * # noqa: F401, F403 from .powerlaws import * # noqa: F401, F403 from .projections import * # noqa: F401, F403 from .rotations import * # noqa: F401, F403 from .spline import * # noqa: F401, F403 from .tabular import * # noqa: F401, F403 # Attach a docstring explaining constraints to all models which support them. # Note: add new models to this list CONSTRAINTS_DOC = """ Other Parameters ---------------- fixed : a dict, optional A dictionary ``{parameter_name: boolean}`` of parameters to not be varied during fitting. True means the parameter is held fixed. Alternatively the `~astropy.modeling.Parameter.fixed` property of a parameter may be used. tied : dict, optional A dictionary ``{parameter_name: callable}`` of parameters which are linked to some other parameter. The dictionary values are callables providing the linking relationship. Alternatively the `~astropy.modeling.Parameter.tied` property of a parameter may be used. bounds : dict, optional A dictionary ``{parameter_name: value}`` of lower and upper bounds of parameters. Keys are parameter names. Values are a list or a tuple of length 2 giving the desired range for the parameter. Alternatively, the `~astropy.modeling.Parameter.min` and `~astropy.modeling.Parameter.max` properties of a parameter may be used. eqcons : list, optional A list of functions of length ``n`` such that ``eqcons[j](x0,*args) == 0.0`` in a successfully optimized problem. ineqcons : list, optional A list of functions of length ``n`` such that ``ieqcons[j](x0,*args) >= 0.0`` is a successfully optimized problem. """ MODELS_WITH_CONSTRAINTS = [ AiryDisk2D, Moffat1D, Moffat2D, Box1D, Box2D, # noqa: F405 Const1D, Const2D, Ellipse2D, Disk2D, # noqa: F405 Gaussian1D, Gaussian2D, # noqa: F405 Linear1D, Lorentz1D, RickerWavelet1D, RickerWavelet2D, # noqa: F405 PowerLaw1D, Sersic1D, Sersic2D, # noqa: F405 Sine1D, Cosine1D, Tangent1D, ArcSine1D, ArcCosine1D, ArcTangent1D, # noqa: F405 Trapezoid1D, TrapezoidDisk2D, # noqa: F405 Chebyshev1D, Chebyshev2D, Hermite1D, Hermite2D, Legendre2D, Legendre1D, # noqa: F405 Polynomial1D, Polynomial2D, Voigt1D, KingProjectedAnalytic1D, # noqa: F405 NFW # noqa: F405 ] for item in MODELS_WITH_CONSTRAINTS: if isinstance(item.__doc__, str): item.__doc__ += CONSTRAINTS_DOC
de6d7ccb0cff2c444c79c8ebc371bd77c8152bfad370bd09b5f9e203e1fcbfdf
""" Special models useful for complex compound models where control is needed over which outputs from a source model are mapped to which inputs of a target model. """ # pylint: disable=invalid-name from astropy.units import Quantity from .core import FittableModel, Model __all__ = ['Mapping', 'Identity', 'UnitsMapping'] class Mapping(FittableModel): """ Allows inputs to be reordered, duplicated or dropped. Parameters ---------- mapping : tuple A tuple of integers representing indices of the inputs to this model to return and in what order to return them. See :ref:`astropy:compound-model-mappings` for more details. n_inputs : int Number of inputs; if `None` (default) then ``max(mapping) + 1`` is used (i.e. the highest input index used in the mapping). name : str, optional A human-friendly name associated with this model instance (particularly useful for identifying the individual components of a compound model). meta : dict-like Free-form metadata to associate with this model. Raises ------ TypeError Raised when number of inputs is less that ``max(mapping)``. Examples -------- >>> from astropy.modeling.models import Polynomial2D, Shift, Mapping >>> poly1 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3) >>> poly2 = Polynomial2D(1, c0_0=1, c1_0=2.4, c0_1=2.1) >>> model = (Shift(1) & Shift(2)) | Mapping((0, 1, 0, 1)) | (poly1 & poly2) >>> model(1, 2) # doctest: +FLOAT_CMP (17.0, 14.2) """ linear = True # FittableModel is non-linear by default def __init__(self, mapping, n_inputs=None, name=None, meta=None): self._inputs = () self._outputs = () if n_inputs is None: self._n_inputs = max(mapping) + 1 else: self._n_inputs = n_inputs self._n_outputs = len(mapping) super().__init__(name=name, meta=meta) self.inputs = tuple('x' + str(idx) for idx in range(self._n_inputs)) self.outputs = tuple('x' + str(idx) for idx in range(self._n_outputs)) self._mapping = mapping self._input_units_strict = {key: False for key in self._inputs} self._input_units_allow_dimensionless = {key: False for key in self._inputs} @property def n_inputs(self): return self._n_inputs @property def n_outputs(self): return self._n_outputs @property def mapping(self): """Integers representing indices of the inputs.""" return self._mapping def __repr__(self): if self.name is None: return f'<Mapping({self.mapping})>' return f'<Mapping({self.mapping}, name={self.name!r})>' def evaluate(self, *args): if len(args) != self.n_inputs: name = self.name if self.name is not None else "Mapping" raise TypeError(f'{name} expects {self.n_inputs} inputs; got {len(args)}') result = tuple(args[idx] for idx in self._mapping) if self.n_outputs == 1: return result[0] return result @property def inverse(self): """ A `Mapping` representing the inverse of the current mapping. Raises ------ `NotImplementedError` An inverse does no exist on mappings that drop some of its inputs (there is then no way to reconstruct the inputs that were dropped). """ try: mapping = tuple(self.mapping.index(idx) for idx in range(self.n_inputs)) except ValueError: raise NotImplementedError( f"Mappings such as {self.mapping} that drop one or more of their inputs " "are not invertible at this time.") inv = self.__class__(mapping) inv._inputs = self._outputs inv._outputs = self._inputs inv._n_inputs = len(inv._inputs) inv._n_outputs = len(inv._outputs) return inv class Identity(Mapping): """ Returns inputs unchanged. This class is useful in compound models when some of the inputs must be passed unchanged to the next model. Parameters ---------- n_inputs : int Specifies the number of inputs this identity model accepts. name : str, optional A human-friendly name associated with this model instance (particularly useful for identifying the individual components of a compound model). meta : dict-like Free-form metadata to associate with this model. Examples -------- Transform ``(x, y)`` by a shift in x, followed by scaling the two inputs:: >>> from astropy.modeling.models import (Polynomial1D, Shift, Scale, ... Identity) >>> model = (Shift(1) & Identity(1)) | Scale(1.2) & Scale(2) >>> model(1,1) # doctest: +FLOAT_CMP (2.4, 2.0) >>> model.inverse(2.4, 2) # doctest: +FLOAT_CMP (1.0, 1.0) """ linear = True # FittableModel is non-linear by default def __init__(self, n_inputs, name=None, meta=None): mapping = tuple(range(n_inputs)) super().__init__(mapping, name=name, meta=meta) def __repr__(self): if self.name is None: return f'<Identity({self.n_inputs})>' return f'<Identity({self.n_inputs}, name={self.name!r})>' @property def inverse(self): """ The inverse transformation. In this case of `Identity`, ``self.inverse is self``. """ return self class UnitsMapping(Model): """ Mapper that operates on the units of the input, first converting to canonical units, then assigning new units without further conversion. Used by Model.coerce_units to support units on otherwise unitless models such as Polynomial1D. Parameters ---------- mapping : tuple A tuple of (input_unit, output_unit) pairs, one per input, matched to the inputs by position. The first element of the each pair is the unit that the model will accept (specify ``dimensionless_unscaled`` to accept dimensionless input). The second element is the unit that the model will return. Specify ``dimensionless_unscaled`` to return dimensionless Quantity, and `None` to return raw values without Quantity. input_units_equivalencies : dict, optional Default equivalencies to apply to input values. If set, this should be a dictionary where each key is a string that corresponds to one of the model inputs. input_units_allow_dimensionless : dict or bool, optional Allow dimensionless input. If this is True, input values to evaluate will gain the units specified in input_units. If this is a dictionary then it should map input name to a bool to allow dimensionless numbers for that input. name : str, optional A human-friendly name associated with this model instance (particularly useful for identifying the individual components of a compound model). meta : dict-like, optional Free-form metadata to associate with this model. Examples -------- Wrapping a unitless model to require and convert units: >>> from astropy.modeling.models import Polynomial1D, UnitsMapping >>> from astropy import units as u >>> poly = Polynomial1D(1, c0=1, c1=2) >>> model = UnitsMapping(((u.m, None),)) | poly >>> model = model | UnitsMapping(((None, u.s),)) >>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(u.Quantity(1000, u.cm)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(u.Quantity(10, u.cm)) # doctest: +FLOAT_CMP <Quantity 1.2 s> Wrapping a unitless model but still permitting unitless input: >>> from astropy.modeling.models import Polynomial1D, UnitsMapping >>> from astropy import units as u >>> poly = Polynomial1D(1, c0=1, c1=2) >>> model = UnitsMapping(((u.m, None),), input_units_allow_dimensionless=True) | poly >>> model = model | UnitsMapping(((None, u.s),)) >>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(10) # doctest: +FLOAT_CMP <Quantity 21. s> """ def __init__( self, mapping, input_units_equivalencies=None, input_units_allow_dimensionless=False, name=None, meta=None ): self._mapping = mapping none_mapping_count = len([m for m in mapping if m[-1] is None]) if none_mapping_count > 0 and none_mapping_count != len(mapping): raise ValueError("If one return unit is None, then all must be None") # These attributes are read and handled by Model self._input_units_strict = True self.input_units_equivalencies = input_units_equivalencies self._input_units_allow_dimensionless = input_units_allow_dimensionless super().__init__(name=name, meta=meta) # Can't invoke this until after super().__init__, since # we need self.inputs and self.outputs to be populated. self._rebuild_units() def _rebuild_units(self): self._input_units = {input_name: input_unit for input_name, (input_unit, _) in zip(self.inputs, self.mapping)} @property def n_inputs(self): return len(self._mapping) @property def n_outputs(self): return len(self._mapping) @property def inputs(self): return super().inputs @inputs.setter def inputs(self, value): super(UnitsMapping, self.__class__).inputs.fset(self, value) self._rebuild_units() @property def outputs(self): return super().outputs @outputs.setter def outputs(self, value): super(UnitsMapping, self.__class__).outputs.fset(self, value) self._rebuild_units() @property def input_units(self): return self._input_units @property def mapping(self): return self._mapping def evaluate(self, *args): result = [] for arg, (_, return_unit) in zip(args, self.mapping): if isinstance(arg, Quantity): value = arg.value else: value = arg if return_unit is None: result.append(value) else: result.append(Quantity(value, return_unit, subok=True)) if self.n_outputs == 1: return result[0] else: return tuple(result) def __repr__(self): if self.name is None: return f"<UnitsMapping({self.mapping})>" else: return f"<UnitsMapping({self.mapping}, name={self.name!r})>"
41e4e64943c25af75961a6791069546f1040a6b4c03ce25de70ef30e428b2874
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Implements rotations, including spherical rotations as defined in WCS Paper II [1]_ `RotateNative2Celestial` and `RotateCelestial2Native` follow the convention in WCS Paper II to rotate to/from a native sphere and the celestial sphere. The implementation uses `EulerAngleRotation`. The model parameters are three angles: the longitude (``lon``) and latitude (``lat``) of the fiducial point in the celestial system (``CRVAL`` keywords in FITS), and the longitude of the celestial pole in the native system (``lon_pole``). The Euler angles are ``lon+90``, ``90-lat`` and ``-(lon_pole-90)``. References ---------- .. [1] Calabretta, M.R., Greisen, E.W., 2002, A&A, 395, 1077 (Paper II) """ # pylint: disable=invalid-name, too-many-arguments, no-member import math import numpy as np from astropy import units as u from astropy.coordinates.matrix_utilities import matrix_product, rotation_matrix from .core import Model from .parameters import Parameter from .utils import _to_orig_unit, _to_radian __all__ = ['RotateCelestial2Native', 'RotateNative2Celestial', 'Rotation2D', 'EulerAngleRotation', 'RotationSequence3D', 'SphericalRotationSequence'] def _create_matrix(angles, axes_order): matrices = [] for angle, axis in zip(angles, axes_order): if isinstance(angle, u.Quantity): angle = angle.value angle = angle.item() matrices.append(rotation_matrix(angle, axis, unit=u.rad)) result = matrix_product(*matrices[::-1]) return result def spherical2cartesian(alpha, delta): alpha = np.deg2rad(alpha) delta = np.deg2rad(delta) x = np.cos(alpha) * np.cos(delta) y = np.cos(delta) * np.sin(alpha) z = np.sin(delta) return np.array([x, y, z]) def cartesian2spherical(x, y, z): h = np.hypot(x, y) alpha = np.rad2deg(np.arctan2(y, x)) delta = np.rad2deg(np.arctan2(z, h)) return alpha, delta class RotationSequence3D(Model): """ Perform a series of rotations about different axis in 3D space. Positive angles represent a counter-clockwise rotation. Parameters ---------- angles : array-like Angles of rotation in deg in the order of axes_order. axes_order : str A sequence of 'x', 'y', 'z' corresponding to axis of rotation. Examples -------- >>> model = RotationSequence3D([1.1, 2.1, 3.1, 4.1], axes_order='xyzx') """ standard_broadcasting = False _separable = False n_inputs = 3 n_outputs = 3 angles = Parameter(default=[], getter=_to_orig_unit, setter=_to_radian, description="Angles of rotation in deg in the order of axes_order") def __init__(self, angles, axes_order, name=None): self.axes = ['x', 'y', 'z'] unrecognized = set(axes_order).difference(self.axes) if unrecognized: raise ValueError(f"Unrecognized axis label {unrecognized}; " f"should be one of {self.axes} ") self.axes_order = axes_order if len(angles) != len(axes_order): raise ValueError(f"The number of angles {len(angles)} should match " f"the number of axes {len(axes_order)}.") super().__init__(angles, name=name) self._inputs = ('x', 'y', 'z') self._outputs = ('x', 'y', 'z') @property def inverse(self): """Inverse rotation.""" angles = self.angles.value[::-1] * -1 return self.__class__(angles, axes_order=self.axes_order[::-1]) def evaluate(self, x, y, z, angles): """ Apply the rotation to a set of 3D Cartesian coordinates. """ if x.shape != y.shape or x.shape != z.shape: raise ValueError("Expected input arrays to have the same shape") # Note: If the original shape was () (an array scalar) convert to a # 1-element 1-D array on output for consistency with most other models orig_shape = x.shape or (1,) inarr = np.array([x.flatten(), y.flatten(), z.flatten()]) result = np.dot(_create_matrix(angles[0], self.axes_order), inarr) x, y, z = result[0], result[1], result[2] x.shape = y.shape = z.shape = orig_shape return x, y, z class SphericalRotationSequence(RotationSequence3D): """ Perform a sequence of rotations about arbitrary number of axes in spherical coordinates. Parameters ---------- angles : list A sequence of angles (in deg). axes_order : str A sequence of characters ('x', 'y', or 'z') corresponding to the axis of rotation and matching the order in ``angles``. """ def __init__(self, angles, axes_order, name=None, **kwargs): self._n_inputs = 2 self._n_outputs = 2 super().__init__(angles, axes_order=axes_order, name=name, **kwargs) self._inputs = ("lon", "lat") self._outputs = ("lon", "lat") @property def n_inputs(self): return self._n_inputs @property def n_outputs(self): return self._n_outputs def evaluate(self, lon, lat, angles): x, y, z = spherical2cartesian(lon, lat) x1, y1, z1 = super().evaluate(x, y, z, angles) lon, lat = cartesian2spherical(x1, y1, z1) return lon, lat class _EulerRotation: """ Base class which does the actual computation. """ _separable = False def evaluate(self, alpha, delta, phi, theta, psi, axes_order): shape = None if isinstance(alpha, np.ndarray): alpha = alpha.flatten() delta = delta.flatten() shape = alpha.shape inp = spherical2cartesian(alpha, delta) matrix = _create_matrix([phi, theta, psi], axes_order) result = np.dot(matrix, inp) a, b = cartesian2spherical(*result) if shape is not None: a.shape = shape b.shape = shape return a, b _input_units_strict = True _input_units_allow_dimensionless = True @property def input_units(self): """ Input units. """ return {self.inputs[0]: u.deg, self.inputs[1]: u.deg} @property def return_units(self): """ Output units. """ return {self.outputs[0]: u.deg, self.outputs[1]: u.deg} class EulerAngleRotation(_EulerRotation, Model): """ Implements Euler angle intrinsic rotations. Rotates one coordinate system into another (fixed) coordinate system. All coordinate systems are right-handed. The sign of the angles is determined by the right-hand rule.. Parameters ---------- phi, theta, psi : float or `~astropy.units.Quantity` ['angle'] "proper" Euler angles in deg. If floats, they should be in deg. axes_order : str A 3 character string, a combination of 'x', 'y' and 'z', where each character denotes an axis in 3D space. """ n_inputs = 2 n_outputs = 2 phi = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian, description="1st Euler angle (Quantity or value in deg)") theta = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian, description="2nd Euler angle (Quantity or value in deg)") psi = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian, description="3rd Euler angle (Quantity or value in deg)") def __init__(self, phi, theta, psi, axes_order, **kwargs): self.axes = ['x', 'y', 'z'] if len(axes_order) != 3: raise TypeError( "Expected axes_order to be a character sequence of length 3, " f"got {axes_order}") unrecognized = set(axes_order).difference(self.axes) if unrecognized: raise ValueError(f"Unrecognized axis label {unrecognized}; " f"should be one of {self.axes}") self.axes_order = axes_order qs = [isinstance(par, u.Quantity) for par in [phi, theta, psi]] if any(qs) and not all(qs): raise TypeError("All parameters should be of the same type - float or Quantity.") super().__init__(phi=phi, theta=theta, psi=psi, **kwargs) self._inputs = ('alpha', 'delta') self._outputs = ('alpha', 'delta') @property def inverse(self): return self.__class__(phi=-self.psi, theta=-self.theta, psi=-self.phi, axes_order=self.axes_order[::-1]) def evaluate(self, alpha, delta, phi, theta, psi): a, b = super().evaluate(alpha, delta, phi, theta, psi, self.axes_order) return a, b class _SkyRotation(_EulerRotation, Model): """ Base class for RotateNative2Celestial and RotateCelestial2Native. """ lon = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian, description="Latitude") lat = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian, description="Longtitude") lon_pole = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian, description="Longitude of a pole") def __init__(self, lon, lat, lon_pole, **kwargs): qs = [isinstance(par, u.Quantity) for par in [lon, lat, lon_pole]] if any(qs) and not all(qs): raise TypeError("All parameters should be of the same type - float or Quantity.") super().__init__(lon, lat, lon_pole, **kwargs) self.axes_order = 'zxz' def _evaluate(self, phi, theta, lon, lat, lon_pole): alpha, delta = super().evaluate(phi, theta, lon, lat, lon_pole, self.axes_order) mask = alpha < 0 if isinstance(mask, np.ndarray): alpha[mask] += 360 else: alpha += 360 return alpha, delta class RotateNative2Celestial(_SkyRotation): """ Transform from Native to Celestial Spherical Coordinates. Parameters ---------- lon : float or `~astropy.units.Quantity` ['angle'] Celestial longitude of the fiducial point. lat : float or `~astropy.units.Quantity` ['angle'] Celestial latitude of the fiducial point. lon_pole : float or `~astropy.units.Quantity` ['angle'] Longitude of the celestial pole in the native system. Notes ----- If ``lon``, ``lat`` and ``lon_pole`` are numerical values they should be in units of deg. Inputs are angles on the native sphere. Outputs are angles on the celestial sphere. """ n_inputs = 2 n_outputs = 2 @property def input_units(self): """ Input units. """ return {self.inputs[0]: u.deg, self.inputs[1]: u.deg} @property def return_units(self): """ Output units. """ return {self.outputs[0]: u.deg, self.outputs[1]: u.deg} def __init__(self, lon, lat, lon_pole, **kwargs): super().__init__(lon, lat, lon_pole, **kwargs) self.inputs = ('phi_N', 'theta_N') self.outputs = ('alpha_C', 'delta_C') def evaluate(self, phi_N, theta_N, lon, lat, lon_pole): """ Parameters ---------- phi_N, theta_N : float or `~astropy.units.Quantity` ['angle'] Angles in the Native coordinate system. it is assumed that numerical only inputs are in degrees. If float, assumed in degrees. lon, lat, lon_pole : float or `~astropy.units.Quantity` ['angle'] Parameter values when the model was initialized. If float, assumed in degrees. Returns ------- alpha_C, delta_C : float or `~astropy.units.Quantity` ['angle'] Angles on the Celestial sphere. If float, in degrees. """ # The values are in radians since they have already been through the setter. if isinstance(lon, u.Quantity): lon = lon.value lat = lat.value lon_pole = lon_pole.value # Convert to Euler angles phi = lon_pole - np.pi / 2 theta = - (np.pi / 2 - lat) psi = -(np.pi / 2 + lon) alpha_C, delta_C = super()._evaluate(phi_N, theta_N, phi, theta, psi) return alpha_C, delta_C @property def inverse(self): # convert to angles on the celestial sphere return RotateCelestial2Native(self.lon, self.lat, self.lon_pole) class RotateCelestial2Native(_SkyRotation): """ Transform from Celestial to Native Spherical Coordinates. Parameters ---------- lon : float or `~astropy.units.Quantity` ['angle'] Celestial longitude of the fiducial point. lat : float or `~astropy.units.Quantity` ['angle'] Celestial latitude of the fiducial point. lon_pole : float or `~astropy.units.Quantity` ['angle'] Longitude of the celestial pole in the native system. Notes ----- If ``lon``, ``lat`` and ``lon_pole`` are numerical values they should be in units of deg. Inputs are angles on the celestial sphere. Outputs are angles on the native sphere. """ n_inputs = 2 n_outputs = 2 @property def input_units(self): """ Input units. """ return {self.inputs[0]: u.deg, self.inputs[1]: u.deg} @property def return_units(self): """ Output units. """ return {self.outputs[0]: u.deg, self.outputs[1]: u.deg} def __init__(self, lon, lat, lon_pole, **kwargs): super().__init__(lon, lat, lon_pole, **kwargs) # Inputs are angles on the celestial sphere self.inputs = ('alpha_C', 'delta_C') # Outputs are angles on the native sphere self.outputs = ('phi_N', 'theta_N') def evaluate(self, alpha_C, delta_C, lon, lat, lon_pole): """ Parameters ---------- alpha_C, delta_C : float or `~astropy.units.Quantity` ['angle'] Angles in the Celestial coordinate frame. If float, assumed in degrees. lon, lat, lon_pole : float or `~astropy.units.Quantity` ['angle'] Parameter values when the model was initialized. If float, assumed in degrees. Returns ------- phi_N, theta_N : float or `~astropy.units.Quantity` ['angle'] Angles on the Native sphere. If float, in degrees. """ if isinstance(lon, u.Quantity): lon = lon.value lat = lat.value lon_pole = lon_pole.value # Convert to Euler angles phi = (np.pi / 2 + lon) theta = (np.pi / 2 - lat) psi = -(lon_pole - np.pi / 2) phi_N, theta_N = super()._evaluate(alpha_C, delta_C, phi, theta, psi) return phi_N, theta_N @property def inverse(self): return RotateNative2Celestial(self.lon, self.lat, self.lon_pole) class Rotation2D(Model): """ Perform a 2D rotation given an angle. Positive angles represent a counter-clockwise rotation and vice-versa. Parameters ---------- angle : float or `~astropy.units.Quantity` ['angle'] Angle of rotation (if float it should be in deg). """ n_inputs = 2 n_outputs = 2 _separable = False angle = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian, description="Angle of rotation (Quantity or value in deg)") def __init__(self, angle=angle, **kwargs): super().__init__(angle=angle, **kwargs) self._inputs = ("x", "y") self._outputs = ("x", "y") @property def inverse(self): """Inverse rotation.""" return self.__class__(angle=-self.angle) @classmethod def evaluate(cls, x, y, angle): """ Rotate (x, y) about ``angle``. Parameters ---------- x, y : array-like Input quantities angle : float or `~astropy.units.Quantity` ['angle'] Angle of rotations. If float, assumed in degrees. """ if x.shape != y.shape: raise ValueError("Expected input arrays to have the same shape") # If one argument has units, enforce they both have units and they are compatible. x_unit = getattr(x, 'unit', None) y_unit = getattr(y, 'unit', None) has_units = x_unit is not None and y_unit is not None if x_unit != y_unit: if has_units and y_unit.is_equivalent(x_unit): y = y.to(x_unit) y_unit = x_unit else: raise u.UnitsError("x and y must have compatible units") # Note: If the original shape was () (an array scalar) convert to a # 1-element 1-D array on output for consistency with most other models orig_shape = x.shape or (1,) inarr = np.array([x.flatten(), y.flatten()]) if isinstance(angle, u.Quantity): angle = angle.to_value(u.rad) result = np.dot(cls._compute_matrix(angle), inarr) x, y = result[0], result[1] x.shape = y.shape = orig_shape if has_units: return u.Quantity(x, unit=x_unit), u.Quantity(y, unit=y_unit) return x, y @staticmethod def _compute_matrix(angle): return np.array([[math.cos(angle), -math.sin(angle)], [math.sin(angle), math.cos(angle)]], dtype=np.float64)
131cb50c86a6b04a36a57600d759fde99b3b0b50d5619ef7ac8d99548c3bcce6
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Define Numpy Ufuncs as Models. """ import numpy as np from astropy.modeling.core import Model trig_ufuncs = ["sin", "cos", "tan", "arcsin", "arccos", "arctan", "arctan2", "hypot", "sinh", "cosh", "tanh", "arcsinh", "arccosh", "arctanh", "deg2rad", "rad2deg"] math_ops = ["add", "subtract", "multiply", "logaddexp", "logaddexp2", "true_divide", "floor_divide", "negative", "positive", "power", "remainder", "fmod", "divmod", "absolute", "fabs", "rint", "exp", "exp2", "log", "log2", "log10", "expm1", "log1p", "sqrt", "square", "cbrt", "reciprocal", "divide", "mod"] supported_ufuncs = trig_ufuncs + math_ops # These names are just aliases for other ufunc objects # in the numpy API. The alias name must occur later # in the lists above. alias_ufuncs = { "divide": "true_divide", "mod": "remainder", } class _NPUfuncModel(Model): _is_dynamic = True def __init__(self, **kwargs): super().__init__(**kwargs) def _make_class_name(name): """ Make a ufunc model class name from the name of the ufunc. """ return name[0].upper() + name[1:] + 'Ufunc' def ufunc_model(name): """ Define a Model from a Numpy ufunc name.""" ufunc = getattr(np, name) nin = ufunc.nin nout = ufunc.nout if nin == 1: separable = True def evaluate(self, x): return self.func(x) else: separable = False def evaluate(self, x, y): return self.func(x, y) klass_name = _make_class_name(name) members = {'n_inputs': nin, 'n_outputs': nout, 'func': ufunc, 'linear': False, 'fittable': False, '_separable': separable, '_is_dynamic': True, 'evaluate': evaluate} klass = type(str(klass_name), (_NPUfuncModel,), members) klass.__module__ = 'astropy.modeling.math_functions' return klass __all__ = [] for name in supported_ufuncs: if name in alias_ufuncs: klass_name = _make_class_name(name) alias_klass_name = _make_class_name(alias_ufuncs[name]) globals()[klass_name] = globals()[alias_klass_name] __all__.append(klass_name) else: m = ufunc_model(name) klass_name = m.__name__ globals()[klass_name] = m __all__.append(klass_name)
9f127a86991fe59aab3eed36464d8c7a993227e5a881e05d8fb732d93ddb976d
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Functions to determine if a model is separable, i.e. if the model outputs are independent. It analyzes ``n_inputs``, ``n_outputs`` and the operators in a compound model by stepping through the transforms and creating a ``coord_matrix`` of shape (``n_outputs``, ``n_inputs``). Each modeling operator is represented by a function which takes two simple models (or two ``coord_matrix`` arrays) and returns an array of shape (``n_outputs``, ``n_inputs``). """ import numpy as np from .core import CompoundModel, Model, ModelDefinitionError from .mappings import Mapping __all__ = ["is_separable", "separability_matrix"] def is_separable(transform): """ A separability test for the outputs of a transform. Parameters ---------- transform : `~astropy.modeling.core.Model` A (compound) model. Returns ------- is_separable : ndarray A boolean array with size ``transform.n_outputs`` where each element indicates whether the output is independent and the result of a separable transform. Examples -------- >>> from astropy.modeling.models import Shift, Scale, Rotation2D, Polynomial2D >>> is_separable(Shift(1) & Shift(2) | Scale(1) & Scale(2)) array([ True, True]...) >>> is_separable(Shift(1) & Shift(2) | Rotation2D(2)) array([False, False]...) >>> is_separable(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]) | \ Polynomial2D(1) & Polynomial2D(2)) array([False, False]...) >>> is_separable(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1])) array([ True, True, True, True]...) """ if transform.n_inputs == 1 and transform.n_outputs > 1: is_separable = np.array([False] * transform.n_outputs).T return is_separable separable_matrix = _separable(transform) is_separable = separable_matrix.sum(1) is_separable = np.where(is_separable != 1, False, True) return is_separable def separability_matrix(transform): """ Compute the correlation between outputs and inputs. Parameters ---------- transform : `~astropy.modeling.core.Model` A (compound) model. Returns ------- separable_matrix : ndarray A boolean correlation matrix of shape (n_outputs, n_inputs). Indicates the dependence of outputs on inputs. For completely independent outputs, the diagonal elements are True and off-diagonal elements are False. Examples -------- >>> from astropy.modeling.models import Shift, Scale, Rotation2D, Polynomial2D >>> separability_matrix(Shift(1) & Shift(2) | Scale(1) & Scale(2)) array([[ True, False], [False, True]]...) >>> separability_matrix(Shift(1) & Shift(2) | Rotation2D(2)) array([[ True, True], [ True, True]]...) >>> separability_matrix(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]) | \ Polynomial2D(1) & Polynomial2D(2)) array([[ True, True], [ True, True]]...) >>> separability_matrix(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1])) array([[ True, False], [False, True], [ True, False], [False, True]]...) """ if transform.n_inputs == 1 and transform.n_outputs > 1: return np.ones((transform.n_outputs, transform.n_inputs), dtype=np.bool_) separable_matrix = _separable(transform) separable_matrix = np.where(separable_matrix != 0, True, False) return separable_matrix def _compute_n_outputs(left, right): """ Compute the number of outputs of two models. The two models are the left and right model to an operation in the expression tree of a compound model. Parameters ---------- left, right : `astropy.modeling.Model` or ndarray If input is of an array, it is the output of `coord_matrix`. """ if isinstance(left, Model): lnout = left.n_outputs else: lnout = left.shape[0] if isinstance(right, Model): rnout = right.n_outputs else: rnout = right.shape[0] noutp = lnout + rnout return noutp def _arith_oper(left, right): """ Function corresponding to one of the arithmetic operators ['+', '-'. '*', '/', '**']. This always returns a nonseparable output. Parameters ---------- left, right : `astropy.modeling.Model` or ndarray If input is of an array, it is the output of `coord_matrix`. Returns ------- result : ndarray Result from this operation. """ # models have the same number of inputs and outputs def _n_inputs_outputs(input): if isinstance(input, Model): n_outputs, n_inputs = input.n_outputs, input.n_inputs else: n_outputs, n_inputs = input.shape return n_inputs, n_outputs left_inputs, left_outputs = _n_inputs_outputs(left) right_inputs, right_outputs = _n_inputs_outputs(right) if left_inputs != right_inputs or left_outputs != right_outputs: raise ModelDefinitionError( f"Unsupported operands for arithmetic operator: left (n_inputs={left_inputs}, " f"n_outputs={left_outputs}) and right (n_inputs={right_inputs}, " f"n_outputs={right_outputs}); models must have the same n_inputs and the same " "n_outputs for this operator.") result = np.ones((left_outputs, left_inputs)) return result def _coord_matrix(model, pos, noutp): """ Create an array representing inputs and outputs of a simple model. The array has a shape (noutp, model.n_inputs). Parameters ---------- model : `astropy.modeling.Model` model pos : str Position of this model in the expression tree. One of ['left', 'right']. noutp : int Number of outputs of the compound model of which the input model is a left or right child. """ if isinstance(model, Mapping): axes = [] for i in model.mapping: axis = np.zeros((model.n_inputs,)) axis[i] = 1 axes.append(axis) m = np.vstack(axes) mat = np.zeros((noutp, model.n_inputs)) if pos == 'left': mat[: model.n_outputs, :model.n_inputs] = m else: mat[-model.n_outputs:, -model.n_inputs:] = m return mat if not model.separable: # this does not work for more than 2 coordinates mat = np.zeros((noutp, model.n_inputs)) if pos == 'left': mat[:model.n_outputs, : model.n_inputs] = 1 else: mat[-model.n_outputs:, -model.n_inputs:] = 1 else: mat = np.zeros((noutp, model.n_inputs)) for i in range(model.n_inputs): mat[i, i] = 1 if pos == 'right': mat = np.roll(mat, (noutp - model.n_outputs)) return mat def _cstack(left, right): """ Function corresponding to '&' operation. Parameters ---------- left, right : `astropy.modeling.Model` or ndarray If input is of an array, it is the output of `coord_matrix`. Returns ------- result : ndarray Result from this operation. """ noutp = _compute_n_outputs(left, right) if isinstance(left, Model): cleft = _coord_matrix(left, 'left', noutp) else: cleft = np.zeros((noutp, left.shape[1])) cleft[: left.shape[0], : left.shape[1]] = left if isinstance(right, Model): cright = _coord_matrix(right, 'right', noutp) else: cright = np.zeros((noutp, right.shape[1])) cright[-right.shape[0]:, -right.shape[1]:] = right return np.hstack([cleft, cright]) def _cdot(left, right): """ Function corresponding to "|" operation. Parameters ---------- left, right : `astropy.modeling.Model` or ndarray If input is of an array, it is the output of `coord_matrix`. Returns ------- result : ndarray Result from this operation. """ left, right = right, left def _n_inputs_outputs(input, position): """ Return ``n_inputs``, ``n_outputs`` for a model or coord_matrix. """ if isinstance(input, Model): coords = _coord_matrix(input, position, input.n_outputs) else: coords = input return coords cleft = _n_inputs_outputs(left, 'left') cright = _n_inputs_outputs(right, 'right') try: result = np.dot(cleft, cright) except ValueError: raise ModelDefinitionError( 'Models cannot be combined with the "|" operator; ' f'left coord_matrix is {cright}, right coord_matrix is {cleft}') return result def _separable(transform): """ Calculate the separability of outputs. Parameters ---------- transform : `astropy.modeling.Model` A transform (usually a compound model). Returns : is_separable : ndarray of dtype np.bool An array of shape (transform.n_outputs,) of boolean type Each element represents the separablity of the corresponding output. """ if (transform_matrix := transform._calculate_separability_matrix()) is not NotImplemented: return transform_matrix elif isinstance(transform, CompoundModel): sepleft = _separable(transform.left) sepright = _separable(transform.right) return _operators[transform.op](sepleft, sepright) elif isinstance(transform, Model): return _coord_matrix(transform, 'left', transform.n_outputs) # Maps modeling operators to a function computing and represents the # relationship of axes as an array of 0-es and 1-s _operators = {'&': _cstack, '|': _cdot, '+': _arith_oper, '-': _arith_oper, '*': _arith_oper, '/': _arith_oper, '**': _arith_oper}
fbe3ac45b28b7600c4a032383ef2eb5b172d7a0547e22775e71d15eefc413f4f
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module provides utility functions for the models package. """ import warnings # pylint: disable=invalid-name from collections import UserDict from collections.abc import MutableMapping from inspect import signature import numpy as np from astropy import units as u from astropy.utils.decorators import deprecated __doctest_skip__ = ['AliasDict'] __all__ = ['AliasDict', 'poly_map_domain', 'comb', 'ellipse_extent'] deprecation_msg = """ AliasDict is deprecated because it no longer serves a function anywhere inside astropy. """ @deprecated('5.0', deprecation_msg) class AliasDict(MutableMapping): """ Creates a `dict` like object that wraps an existing `dict` or other `MutableMapping`, along with a `dict` of *key aliases* that translate between specific keys in this dict to different keys in the underlying dict. In other words, keys that do not have an associated alias are accessed and stored like a normal `dict`. However, a key that has an alias is accessed and stored to the "parent" dict via the alias. Parameters ---------- parent : dict-like The parent `dict` that aliased keys and accessed from and stored to. aliases : dict-like Maps keys in this dict to their associated keys in the parent dict. Examples -------- >>> parent = {'a': 1, 'b': 2, 'c': 3} >>> aliases = {'foo': 'a', 'bar': 'c'} >>> alias_dict = AliasDict(parent, aliases) >>> alias_dict['foo'] 1 >>> alias_dict['bar'] 3 Keys in the original parent dict are not visible if they were not aliased: >>> alias_dict['b'] Traceback (most recent call last): ... KeyError: 'b' Likewise, updates to aliased keys are reflected back in the parent dict: >>> alias_dict['foo'] = 42 >>> alias_dict['foo'] 42 >>> parent['a'] 42 However, updates/insertions to keys that are *not* aliased are not reflected in the parent dict: >>> alias_dict['qux'] = 99 >>> alias_dict['qux'] 99 >>> 'qux' in parent False In particular, updates on the `AliasDict` to a key that is equal to one of the aliased keys in the parent dict does *not* update the parent dict. For example, ``alias_dict`` aliases ``'foo'`` to ``'a'``. But assigning to a key ``'a'`` on the `AliasDict` does not impact the parent: >>> alias_dict['a'] = 'nope' >>> alias_dict['a'] 'nope' >>> parent['a'] 42 """ _store_type = dict """ Subclasses may override this to use other mapping types as the underlying storage, for example an `OrderedDict`. However, even in this case additional work may be needed to get things like the ordering right. """ def __init__(self, parent, aliases): self._parent = parent self._store = self._store_type() self._aliases = dict(aliases) def __getitem__(self, key): if key in self._aliases: try: return self._parent[self._aliases[key]] except KeyError: raise KeyError(key) return self._store[key] def __setitem__(self, key, value): if key in self._aliases: self._parent[self._aliases[key]] = value else: self._store[key] = value def __delitem__(self, key): if key in self._aliases: try: del self._parent[self._aliases[key]] except KeyError: raise KeyError(key) else: del self._store[key] def __iter__(self): """ First iterates over keys from the parent dict (if the aliased keys are present in the parent), followed by any keys in the local store. """ for key, alias in self._aliases.items(): if alias in self._parent: yield key for key in self._store: yield key def __len__(self): return len(list(iter(self))) def __repr__(self): # repr() just like any other dict--this should look transparent store_copy = self._store_type() for key, alias in self._aliases.items(): if alias in self._parent: store_copy[key] = self._parent[alias] store_copy.update(self._store) return repr(store_copy) def make_binary_operator_eval(oper, f, g): """ Given a binary operator (as a callable of two arguments) ``oper`` and two callables ``f`` and ``g`` which accept the same arguments, returns a *new* function that takes the same arguments as ``f`` and ``g``, but passes the outputs of ``f`` and ``g`` in the given ``oper``. ``f`` and ``g`` are assumed to return tuples (which may be 1-tuples). The given operator is applied element-wise to tuple outputs). Example ------- >>> from operator import add >>> def prod(x, y): ... return (x * y,) ... >>> sum_of_prod = make_binary_operator_eval(add, prod, prod) >>> sum_of_prod(3, 5) (30,) """ return (lambda inputs, params: tuple(oper(x, y) for x, y in zip(f(inputs, params), g(inputs, params)))) def poly_map_domain(oldx, domain, window): """ Map domain into window by shifting and scaling. Parameters ---------- oldx : array original coordinates domain : list or tuple of length 2 function domain window : list or tuple of length 2 range into which to map the domain """ domain = np.array(domain, dtype=np.float64) window = np.array(window, dtype=np.float64) if domain.shape != (2,) or window.shape != (2,): raise ValueError('Expected "domain" and "window" to be a tuple of size 2.') scl = (window[1] - window[0]) / (domain[1] - domain[0]) off = (window[0] * domain[1] - window[1] * domain[0]) / (domain[1] - domain[0]) return off + scl * oldx def _validate_domain_window(value): if value is not None: if np.asanyarray(value).shape != (2, ): raise ValueError('domain and window should be tuples of size 2.') return tuple(value) return value def comb(N, k): """ The number of combinations of N things taken k at a time. Parameters ---------- N : int, array Number of things. k : int, array Number of elements taken. """ if (k > N) or (N < 0) or (k < 0): return 0 val = 1 for j in range(min(k, N - k)): val = (val * (N - j)) / (j + 1) return val def array_repr_oneline(array): """ Represents a multi-dimensional Numpy array flattened onto a single line. """ r = np.array2string(array, separator=', ', suppress_small=True) return ' '.join(line.strip() for line in r.splitlines()) def combine_labels(left, right): """ For use with the join operator &: Combine left input/output labels with right input/output labels. If none of the labels conflict then this just returns a sum of tuples. However if *any* of the labels conflict, this appends '0' to the left-hand labels and '1' to the right-hand labels so there is no ambiguity). """ if set(left).intersection(right): left = tuple(label + '0' for label in left) right = tuple(label + '1' for label in right) return left + right def ellipse_extent(a, b, theta): """ Calculates the half size of a box encapsulating a rotated 2D ellipse. Parameters ---------- a : float or `~astropy.units.Quantity` The ellipse semimajor axis. b : float or `~astropy.units.Quantity` The ellipse semiminor axis. theta : float or `~astropy.units.Quantity` ['angle'] The rotation angle as an angular quantity (`~astropy.units.Quantity` or `~astropy.coordinates.Angle`) or a value in radians (as a float). The rotation angle increases counterclockwise. Returns ------- offsets : tuple The absolute value of the offset distances from the ellipse center that define its bounding box region, ``(dx, dy)``. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Ellipse2D from astropy.modeling.utils import ellipse_extent, render_model amplitude = 1 x0 = 50 y0 = 50 a = 30 b = 10 theta = np.pi / 4 model = Ellipse2D(amplitude, x0, y0, a, b, theta) dx, dy = ellipse_extent(a, b, theta) limits = [x0 - dx, x0 + dx, y0 - dy, y0 + dy] model.bounding_box = limits image = render_model(model) plt.imshow(image, cmap='binary', interpolation='nearest', alpha=.5, extent = limits) plt.show() """ from .parameters import Parameter # prevent circular import if isinstance(theta, Parameter): if theta.quantity is None: theta = theta.value else: theta = theta.quantity t = np.arctan2(-b * np.tan(theta), a) dx = a * np.cos(t) * np.cos(theta) - b * np.sin(t) * np.sin(theta) t = np.arctan2(b, a * np.tan(theta)) dy = b * np.sin(t) * np.cos(theta) + a * np.cos(t) * np.sin(theta) if isinstance(dx, u.Quantity) or isinstance(dy, u.Quantity): return np.abs(u.Quantity([dx, dy])) return np.abs([dx, dy]) def get_inputs_and_params(func): """ Given a callable, determine the input variables and the parameters. Parameters ---------- func : callable Returns ------- inputs, params : tuple Each entry is a list of inspect.Parameter objects """ sig = signature(func) inputs = [] params = [] for param in sig.parameters.values(): if param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD): raise ValueError("Signature must not have *args or **kwargs") if param.default == param.empty: inputs.append(param) else: params.append(param) return inputs, params def _combine_equivalency_dict(keys, eq1=None, eq2=None): # Given two dictionaries that give equivalencies for a set of keys, for # example input value names, return a dictionary that includes all the # equivalencies eq = {} for key in keys: eq[key] = [] if eq1 is not None and key in eq1: eq[key].extend(eq1[key]) if eq2 is not None and key in eq2: eq[key].extend(eq2[key]) return eq def _to_radian(value): """ Convert ``value`` to radian. """ if isinstance(value, u.Quantity): return value.to(u.rad) return np.deg2rad(value) def _to_orig_unit(value, raw_unit=None, orig_unit=None): """ Convert value with ``raw_unit`` to ``orig_unit``. """ if raw_unit is not None: return (value * raw_unit).to(orig_unit) return np.rad2deg(value) class _ConstraintsDict(UserDict): """ Wrapper around UserDict to allow updating the constraints on a Parameter when the dictionary is updated. """ def __init__(self, model, constraint_type): self._model = model self.constraint_type = constraint_type c = {} for name in model.param_names: param = getattr(model, name) c[name] = getattr(param, constraint_type) super().__init__(c) def __setitem__(self, key, val): super().__setitem__(key, val) param = getattr(self._model, key) setattr(param, self.constraint_type, val) class _SpecialOperatorsDict(UserDict): """ Wrapper around UserDict to allow for better tracking of the Special Operators for CompoundModels. This dictionary is structured so that one cannot inadvertently overwrite an existing special operator. Parameters ---------- unique_id: int the last used unique_id for a SPECIAL OPERATOR special_operators: dict a dictionary containing the special_operators Notes ----- Direct setting of operators (`dict[key] = value`) into the dictionary has been deprecated in favor of the `.add(name, value)` method, so that unique dictionary keys can be generated and tracked consistently. """ def __init__(self, unique_id=0, special_operators={}): super().__init__(special_operators) self._unique_id = unique_id def _set_value(self, key, val): if key in self: raise ValueError(f'Special operator "{key}" already exists') else: super().__setitem__(key, val) def __setitem__(self, key, val): self._set_value(key, val) warnings.warn(DeprecationWarning( """ Special operator dictionary assignment has been deprecated. Please use `.add` instead, so that you can capture a unique key for your operator. """ )) def _get_unique_id(self): self._unique_id += 1 return self._unique_id def add(self, operator_name, operator): """ Adds a special operator to the dictionary, and then returns the unique key that the operator is stored under for later reference. Parameters ---------- operator_name: str the name for the operator operator: function the actual operator function which will be used Returns ------- the unique operator key for the dictionary `(operator_name, unique_id)` """ key = (operator_name, self._get_unique_id()) self._set_value(key, operator) return key
ecdce5bca110cd073d804e34c412f90fc85b05788c4260d425e4fd4d4d09ba9c
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module is to contain an improved bounding box """ import abc import copy import warnings from collections import namedtuple from typing import Any, Callable, Dict, List, Tuple import numpy as np from astropy.units import Quantity from astropy.utils import isiterable __all__ = ['ModelBoundingBox', 'CompoundBoundingBox'] _BaseInterval = namedtuple('_BaseInterval', "lower upper") class _Interval(_BaseInterval): """ A single input's bounding box interval. Parameters ---------- lower : float The lower bound of the interval upper : float The upper bound of the interval Methods ------- validate : Contructs a valid interval outside : Determine which parts of an input array are outside the interval. domain : Contructs a discretization of the points inside the interval. """ def __repr__(self): return f"Interval(lower={self.lower}, upper={self.upper})" def copy(self): return copy.deepcopy(self) @staticmethod def _validate_shape(interval): """Validate the shape of an interval representation""" MESSAGE = """An interval must be some sort of sequence of length 2""" try: shape = np.shape(interval) except TypeError: try: # np.shape does not work with lists of Quantities if len(interval) == 1: interval = interval[0] shape = np.shape([b.to_value() for b in interval]) except (ValueError, TypeError, AttributeError): raise ValueError(MESSAGE) valid_shape = shape in ((2,), (1, 2), (2, 0)) if not valid_shape: valid_shape = (len(shape) > 0 and shape[0] == 2 and all(isinstance(b, np.ndarray) for b in interval)) if not isiterable(interval) or not valid_shape: raise ValueError(MESSAGE) @classmethod def _validate_bounds(cls, lower, upper): """Validate the bounds are reasonable and construct an interval from them.""" if (np.asanyarray(lower) > np.asanyarray(upper)).all(): warnings.warn(f"Invalid interval: upper bound {upper} " f"is strictly less than lower bound {lower}.", RuntimeWarning) return cls(lower, upper) @classmethod def validate(cls, interval): """ Construct and validate an interval Parameters ---------- interval : iterable A representation of the interval. Returns ------- A validated interval. """ cls._validate_shape(interval) if len(interval) == 1: interval = tuple(interval[0]) else: interval = tuple(interval) return cls._validate_bounds(interval[0], interval[1]) def outside(self, _input: np.ndarray): """ Parameters ---------- _input : np.ndarray The evaluation input in the form of an array. Returns ------- Boolean array indicating which parts of _input are outside the interval: True -> position outside interval False -> position inside interval """ return np.logical_or(_input < self.lower, _input > self.upper) def domain(self, resolution): return np.arange(self.lower, self.upper + resolution, resolution) # The interval where all ignored inputs can be found. _ignored_interval = _Interval.validate((-np.inf, np.inf)) def get_index(model, key) -> int: """ Get the input index corresponding to the given key. Can pass in either: the string name of the input or the input index itself. """ if isinstance(key, str): if key in model.inputs: index = model.inputs.index(key) else: raise ValueError(f"'{key}' is not one of the inputs: {model.inputs}.") elif np.issubdtype(type(key), np.integer): if 0 <= key < len(model.inputs): index = key else: raise IndexError(f"Integer key: {key} must be non-negative and < {len(model.inputs)}.") else: raise ValueError(f"Key value: {key} must be string or integer.") return index def get_name(model, index: int): """Get the input name corresponding to the input index""" return model.inputs[index] class _BoundingDomain(abc.ABC): """ Base class for ModelBoundingBox and CompoundBoundingBox. This is where all the `~astropy.modeling.core.Model` evaluation code for evaluating with a bounding box is because it is common to both types of bounding box. Parameters ---------- model : `~astropy.modeling.Model` The Model this bounding domain is for. prepare_inputs : Generates the necessary input information so that model can be evaluated only for input points entirely inside bounding_box. This needs to be implemented by a subclass. Note that most of the implementation is in ModelBoundingBox. prepare_outputs : Fills the output values in for any input points outside the bounding_box. evaluate : Performs a complete model evaluation while enforcing the bounds on the inputs and returns a complete output. """ def __init__(self, model, ignored: List[int] = None, order: str = 'C'): self._model = model self._ignored = self._validate_ignored(ignored) self._order = self._get_order(order) @property def model(self): return self._model @property def order(self) -> str: return self._order @property def ignored(self) -> List[int]: return self._ignored def _get_order(self, order: str = None) -> str: """ Get if bounding_box is C/python ordered or Fortran/mathematically ordered """ if order is None: order = self._order if order not in ('C', 'F'): raise ValueError("order must be either 'C' (C/python order) or " f"'F' (Fortran/mathematical order), got: {order}.") return order def _get_index(self, key) -> int: """ Get the input index corresponding to the given key. Can pass in either: the string name of the input or the input index itself. """ return get_index(self._model, key) def _get_name(self, index: int): """Get the input name corresponding to the input index""" return get_name(self._model, index) @property def ignored_inputs(self) -> List[str]: return [self._get_name(index) for index in self._ignored] def _validate_ignored(self, ignored: list) -> List[int]: if ignored is None: return [] else: return [self._get_index(key) for key in ignored] def __call__(self, *args, **kwargs): raise NotImplementedError( "This bounding box is fixed by the model and does not have " "adjustable parameters.") @abc.abstractmethod def fix_inputs(self, model, fixed_inputs: dict): """ Fix the bounding_box for a `fix_inputs` compound model. Parameters ---------- model : `~astropy.modeling.Model` The new model for which this will be a bounding_box fixed_inputs : dict Dictionary of inputs which have been fixed by this bounding box. """ raise NotImplementedError("This should be implemented by a child class.") @abc.abstractmethod def prepare_inputs(self, input_shape, inputs) -> Tuple[Any, Any, Any]: """ Get prepare the inputs with respect to the bounding box. Parameters ---------- input_shape : tuple The shape that all inputs have be reshaped/broadcasted into inputs : list List of all the model inputs Returns ------- valid_inputs : list The inputs reduced to just those inputs which are all inside their respective bounding box intervals valid_index : array_like array of all indices inside the bounding box all_out: bool if all of the inputs are outside the bounding_box """ raise NotImplementedError("This has not been implemented for BoundingDomain.") @staticmethod def _base_output(input_shape, fill_value): """ Create a baseline output, assuming that the entire input is outside the bounding box Parameters ---------- input_shape : tuple The shape that all inputs have be reshaped/broadcasted into fill_value : float The value which will be assigned to inputs which are outside the bounding box Returns ------- An array of the correct shape containing all fill_value """ return np.zeros(input_shape) + fill_value def _all_out_output(self, input_shape, fill_value): """ Create output if all inputs are outside the domain Parameters ---------- input_shape : tuple The shape that all inputs have be reshaped/broadcasted into fill_value : float The value which will be assigned to inputs which are outside the bounding box Returns ------- A full set of outputs for case that all inputs are outside domain. """ return [self._base_output(input_shape, fill_value) for _ in range(self._model.n_outputs)], None def _modify_output(self, valid_output, valid_index, input_shape, fill_value): """ For a single output fill in all the parts corresponding to inputs outside the bounding box. Parameters ---------- valid_output : numpy array The output from the model corresponding to inputs inside the bounding box valid_index : numpy array array of all indices of inputs inside the bounding box input_shape : tuple The shape that all inputs have be reshaped/broadcasted into fill_value : float The value which will be assigned to inputs which are outside the bounding box Returns ------- An output array with all the indices corresponding to inputs outside the bounding box filled in by fill_value """ output = self._base_output(input_shape, fill_value) if not output.shape: output = np.array(valid_output) else: output[valid_index] = valid_output if np.isscalar(valid_output): output = output.item(0) return output def _prepare_outputs(self, valid_outputs, valid_index, input_shape, fill_value): """ Fill in all the outputs of the model corresponding to inputs outside the bounding_box. Parameters ---------- valid_outputs : list of numpy array The list of outputs from the model corresponding to inputs inside the bounding box valid_index : numpy array array of all indices of inputs inside the bounding box input_shape : tuple The shape that all inputs have be reshaped/broadcasted into fill_value : float The value which will be assigned to inputs which are outside the bounding box Returns ------- List of filled in output arrays. """ outputs = [] for valid_output in valid_outputs: outputs.append(self._modify_output(valid_output, valid_index, input_shape, fill_value)) return outputs def prepare_outputs(self, valid_outputs, valid_index, input_shape, fill_value): """ Fill in all the outputs of the model corresponding to inputs outside the bounding_box, adjusting any single output model so that its output becomes a list of containing that output. Parameters ---------- valid_outputs : list The list of outputs from the model corresponding to inputs inside the bounding box valid_index : array_like array of all indices of inputs inside the bounding box input_shape : tuple The shape that all inputs have be reshaped/broadcasted into fill_value : float The value which will be assigned to inputs which are outside the bounding box """ if self._model.n_outputs == 1: valid_outputs = [valid_outputs] return self._prepare_outputs(valid_outputs, valid_index, input_shape, fill_value) @staticmethod def _get_valid_outputs_unit(valid_outputs, with_units: bool): """ Get the unit for outputs if one is required. Parameters ---------- valid_outputs : list of numpy array The list of outputs from the model corresponding to inputs inside the bounding box with_units : bool whether or not a unit is required """ if with_units: return getattr(valid_outputs, 'unit', None) def _evaluate_model(self, evaluate: Callable, valid_inputs, valid_index, input_shape, fill_value, with_units: bool): """ Evaluate the model using the given evaluate routine Parameters ---------- evaluate : Callable callable which takes in the valid inputs to evaluate model valid_inputs : list of numpy arrays The inputs reduced to just those inputs which are all inside their respective bounding box intervals valid_index : numpy array array of all indices inside the bounding box input_shape : tuple The shape that all inputs have be reshaped/broadcasted into fill_value : float The value which will be assigned to inputs which are outside the bounding box with_units : bool whether or not a unit is required Returns ------- outputs : list containing filled in output values valid_outputs_unit : the unit that will be attached to the outputs """ valid_outputs = evaluate(valid_inputs) valid_outputs_unit = self._get_valid_outputs_unit(valid_outputs, with_units) return self.prepare_outputs(valid_outputs, valid_index, input_shape, fill_value), valid_outputs_unit def _evaluate(self, evaluate: Callable, inputs, input_shape, fill_value, with_units: bool): """ Perform model evaluation steps: prepare_inputs -> evaluate -> prepare_outputs Parameters ---------- evaluate : Callable callable which takes in the valid inputs to evaluate model valid_inputs : list of numpy arrays The inputs reduced to just those inputs which are all inside their respective bounding box intervals valid_index : numpy array array of all indices inside the bounding box input_shape : tuple The shape that all inputs have be reshaped/broadcasted into fill_value : float The value which will be assigned to inputs which are outside the bounding box with_units : bool whether or not a unit is required Returns ------- outputs : list containing filled in output values valid_outputs_unit : the unit that will be attached to the outputs """ valid_inputs, valid_index, all_out = self.prepare_inputs(input_shape, inputs) if all_out: return self._all_out_output(input_shape, fill_value) else: return self._evaluate_model(evaluate, valid_inputs, valid_index, input_shape, fill_value, with_units) @staticmethod def _set_outputs_unit(outputs, valid_outputs_unit): """ Set the units on the outputs prepare_inputs -> evaluate -> prepare_outputs -> set output units Parameters ---------- outputs : list containing filled in output values valid_outputs_unit : the unit that will be attached to the outputs Returns ------- List containing filled in output values and units """ if valid_outputs_unit is not None: return Quantity(outputs, valid_outputs_unit, copy=False) return outputs def evaluate(self, evaluate: Callable, inputs, fill_value): """ Perform full model evaluation steps: prepare_inputs -> evaluate -> prepare_outputs -> set output units Parameters ---------- evaluate : callable callable which takes in the valid inputs to evaluate model valid_inputs : list The inputs reduced to just those inputs which are all inside their respective bounding box intervals valid_index : array_like array of all indices inside the bounding box fill_value : float The value which will be assigned to inputs which are outside the bounding box """ input_shape = self._model.input_shape(inputs) # NOTE: CompoundModel does not currently support units during # evaluation for bounding_box so this feature is turned off # for CompoundModel(s). outputs, valid_outputs_unit = self._evaluate(evaluate, inputs, input_shape, fill_value, self._model.bbox_with_units) return tuple(self._set_outputs_unit(outputs, valid_outputs_unit)) class ModelBoundingBox(_BoundingDomain): """ A model's bounding box Parameters ---------- intervals : dict A dictionary containing all the intervals for each model input keys -> input index values -> interval for that index model : `~astropy.modeling.Model` The Model this bounding_box is for. ignored : list A list containing all the inputs (index) which will not be checked for whether or not their elements are in/out of an interval. order : optional, str The ordering that is assumed for the tuple representation of this bounding_box. Options: 'C': C/Python order, e.g. z, y, x. (default), 'F': Fortran/mathematical notation order, e.g. x, y, z. """ def __init__(self, intervals: Dict[int, _Interval], model, ignored: List[int] = None, order: str = 'C'): super().__init__(model, ignored, order) self._intervals = {} if intervals != () and intervals != {}: self._validate(intervals, order=order) def copy(self, ignored=None): intervals = {index: interval.copy() for index, interval in self._intervals.items()} if ignored is None: ignored = self._ignored.copy() return ModelBoundingBox(intervals, self._model, ignored=ignored, order=self._order) @property def intervals(self) -> Dict[int, _Interval]: """Return bounding_box labeled using input positions""" return self._intervals @property def named_intervals(self) -> Dict[str, _Interval]: """Return bounding_box labeled using input names""" return {self._get_name(index): bbox for index, bbox in self._intervals.items()} def __repr__(self): parts = [ 'ModelBoundingBox(', ' intervals={' ] for name, interval in self.named_intervals.items(): parts.append(f" {name}: {interval}") parts.append(' }') if len(self._ignored) > 0: parts.append(f" ignored={self.ignored_inputs}") parts.append(f' model={self._model.__class__.__name__}(inputs={self._model.inputs})') parts.append(f" order='{self._order}'") parts.append(')') return '\n'.join(parts) def __len__(self): return len(self._intervals) def __contains__(self, key): try: return self._get_index(key) in self._intervals or self._ignored except (IndexError, ValueError): return False def has_interval(self, key): return self._get_index(key) in self._intervals def __getitem__(self, key): """Get bounding_box entries by either input name or input index""" index = self._get_index(key) if index in self._ignored: return _ignored_interval else: return self._intervals[self._get_index(key)] def bounding_box(self, order: str = None): """ Return the old tuple of tuples representation of the bounding_box order='C' corresponds to the old bounding_box ordering order='F' corresponds to the gwcs bounding_box ordering. """ if len(self._intervals) == 1: return tuple(list(self._intervals.values())[0]) else: order = self._get_order(order) inputs = self._model.inputs if order == 'C': inputs = inputs[::-1] bbox = tuple([tuple(self[input_name]) for input_name in inputs]) if len(bbox) == 1: bbox = bbox[0] return bbox def __eq__(self, value): """Note equality can be either with old representation or new one.""" if isinstance(value, tuple): return self.bounding_box() == value elif isinstance(value, ModelBoundingBox): return (self.intervals == value.intervals) and (self.ignored == value.ignored) else: return False def __setitem__(self, key, value): """Validate and store interval under key (input index or input name).""" index = self._get_index(key) if index in self._ignored: self._ignored.remove(index) self._intervals[index] = _Interval.validate(value) def __delitem__(self, key): """Delete stored interval""" index = self._get_index(key) if index in self._ignored: raise RuntimeError(f"Cannot delete ignored input: {key}!") del self._intervals[index] self._ignored.append(index) def _validate_dict(self, bounding_box: dict): """Validate passing dictionary of intervals and setting them.""" for key, value in bounding_box.items(): self[key] = value @property def _available_input_index(self): model_input_index = [self._get_index(_input) for _input in self._model.inputs] return [_input for _input in model_input_index if _input not in self._ignored] def _validate_sequence(self, bounding_box, order: str = None): """Validate passing tuple of tuples representation (or related) and setting them.""" order = self._get_order(order) if order == 'C': # If bounding_box is C/python ordered, it needs to be reversed # to be in Fortran/mathematical/input order. bounding_box = bounding_box[::-1] for index, value in enumerate(bounding_box): self[self._available_input_index[index]] = value @property def _n_inputs(self) -> int: n_inputs = self._model.n_inputs - len(self._ignored) if n_inputs > 0: return n_inputs else: return 0 def _validate_iterable(self, bounding_box, order: str = None): """Validate and set any iterable representation""" if len(bounding_box) != self._n_inputs: raise ValueError(f"Found {len(bounding_box)} intervals, " f"but must have exactly {self._n_inputs}.") if isinstance(bounding_box, dict): self._validate_dict(bounding_box) else: self._validate_sequence(bounding_box, order) def _validate(self, bounding_box, order: str = None): """Validate and set any representation""" if self._n_inputs == 1 and not isinstance(bounding_box, dict): self[self._available_input_index[0]] = bounding_box else: self._validate_iterable(bounding_box, order) @classmethod def validate(cls, model, bounding_box, ignored: list = None, order: str = 'C', _preserve_ignore: bool = False, **kwargs): """ Construct a valid bounding box for a model. Parameters ---------- model : `~astropy.modeling.Model` The model for which this will be a bounding_box bounding_box : dict, tuple A possible representation of the bounding box order : optional, str The order that a tuple representation will be assumed to be Default: 'C' """ if isinstance(bounding_box, ModelBoundingBox): order = bounding_box.order if _preserve_ignore: ignored = bounding_box.ignored bounding_box = bounding_box.named_intervals new = cls({}, model, ignored=ignored, order=order) new._validate(bounding_box) return new def fix_inputs(self, model, fixed_inputs: dict, _keep_ignored=False): """ Fix the bounding_box for a `fix_inputs` compound model. Parameters ---------- model : `~astropy.modeling.Model` The new model for which this will be a bounding_box fixed_inputs : dict Dictionary of inputs which have been fixed by this bounding box. keep_ignored : bool Keep the ignored inputs of the bounding box (internal argument only) """ new = self.copy() for _input in fixed_inputs.keys(): del new[_input] if _keep_ignored: ignored = new.ignored else: ignored = None return ModelBoundingBox.validate(model, new.named_intervals, ignored=ignored, order=new._order) @property def dimension(self): return len(self) def domain(self, resolution, order: str = None): inputs = self._model.inputs order = self._get_order(order) if order == 'C': inputs = inputs[::-1] return [self[input_name].domain(resolution) for input_name in inputs] def _outside(self, input_shape, inputs): """ Get all the input positions which are outside the bounding_box, so that the corresponding outputs can be filled with the fill value (default NaN). Parameters ---------- input_shape : tuple The shape that all inputs have be reshaped/broadcasted into inputs : list List of all the model inputs Returns ------- outside_index : bool-numpy array True -> position outside bounding_box False -> position inside bounding_box all_out : bool if all of the inputs are outside the bounding_box """ all_out = False outside_index = np.zeros(input_shape, dtype=bool) for index, _input in enumerate(inputs): _input = np.asanyarray(_input) outside = np.broadcast_to(self[index].outside(_input), input_shape) outside_index[outside] = True if outside_index.all(): all_out = True break return outside_index, all_out def _valid_index(self, input_shape, inputs): """ Get the indices of all the inputs inside the bounding_box. Parameters ---------- input_shape : tuple The shape that all inputs have be reshaped/broadcasted into inputs : list List of all the model inputs Returns ------- valid_index : numpy array array of all indices inside the bounding box all_out : bool if all of the inputs are outside the bounding_box """ outside_index, all_out = self._outside(input_shape, inputs) valid_index = np.atleast_1d(np.logical_not(outside_index)).nonzero() if len(valid_index[0]) == 0: all_out = True return valid_index, all_out def prepare_inputs(self, input_shape, inputs) -> Tuple[Any, Any, Any]: """ Get prepare the inputs with respect to the bounding box. Parameters ---------- input_shape : tuple The shape that all inputs have be reshaped/broadcasted into inputs : list List of all the model inputs Returns ------- valid_inputs : list The inputs reduced to just those inputs which are all inside their respective bounding box intervals valid_index : array_like array of all indices inside the bounding box all_out: bool if all of the inputs are outside the bounding_box """ valid_index, all_out = self._valid_index(input_shape, inputs) valid_inputs = [] if not all_out: for _input in inputs: if input_shape: valid_input = np.broadcast_to(np.atleast_1d(_input), input_shape)[valid_index] if np.isscalar(_input): valid_input = valid_input.item(0) valid_inputs.append(valid_input) else: valid_inputs.append(_input) return tuple(valid_inputs), valid_index, all_out _BaseSelectorArgument = namedtuple('_BaseSelectorArgument', "index ignore") class _SelectorArgument(_BaseSelectorArgument): """ Contains a single CompoundBoundingBox slicing input. Parameters ---------- index : int The index of the input in the input list ignore : bool Whether or not this input will be ignored by the bounding box. Methods ------- validate : Returns a valid SelectorArgument for a given model. get_selector : Returns the value of the input for use in finding the correct bounding_box. get_fixed_value : Gets the slicing value from a fix_inputs set of values. """ def __new__(cls, index, ignore): self = super().__new__(cls, index, ignore) return self @classmethod def validate(cls, model, argument, ignored: bool = True): """ Construct a valid selector argument for a CompoundBoundingBox. Parameters ---------- model : `~astropy.modeling.Model` The model for which this will be an argument for. argument : int or str A representation of which evaluation input to use ignored : optional, bool Whether or not to ignore this argument in the ModelBoundingBox. Returns ------- Validated selector_argument """ return cls(get_index(model, argument), ignored) def get_selector(self, *inputs): """ Get the selector value corresponding to this argument Parameters ---------- *inputs : All the processed model evaluation inputs. """ _selector = inputs[self.index] if isiterable(_selector): if len(_selector) == 1: return _selector[0] else: return tuple(_selector) return _selector def name(self, model) -> str: """ Get the name of the input described by this selector argument Parameters ---------- model : `~astropy.modeling.Model` The Model this selector argument is for. """ return get_name(model, self.index) def pretty_repr(self, model): """ Get a pretty-print representation of this object Parameters ---------- model : `~astropy.modeling.Model` The Model this selector argument is for. """ return f"Argument(name='{self.name(model)}', ignore={self.ignore})" def get_fixed_value(self, model, values: dict): """ Gets the value fixed input corresponding to this argument Parameters ---------- model : `~astropy.modeling.Model` The Model this selector argument is for. values : dict Dictionary of fixed inputs. """ if self.index in values: return values[self.index] else: if self.name(model) in values: return values[self.name(model)] else: raise RuntimeError(f"{self.pretty_repr(model)} was not found in {values}") def is_argument(self, model, argument) -> bool: """ Determine if passed argument is described by this selector argument Parameters ---------- model : `~astropy.modeling.Model` The Model this selector argument is for. argument : int or str A representation of which evaluation input is being used """ return self.index == get_index(model, argument) def named_tuple(self, model): """ Get a tuple representation of this argument using the input name from the model. Parameters ---------- model : `~astropy.modeling.Model` The Model this selector argument is for. """ return (self.name(model), self.ignore) class _SelectorArguments(tuple): """ Contains the CompoundBoundingBox slicing description Parameters ---------- input_ : The SelectorArgument values Methods ------- validate : Returns a valid SelectorArguments for its model. get_selector : Returns the selector a set of inputs corresponds to. is_selector : Determines if a selector is correctly formatted for this CompoundBoundingBox. get_fixed_value : Gets the selector from a fix_inputs set of values. """ _kept_ignore = None def __new__(cls, input_: Tuple[_SelectorArgument], kept_ignore: List = None): self = super().__new__(cls, input_) if kept_ignore is None: self._kept_ignore = [] else: self._kept_ignore = kept_ignore return self def pretty_repr(self, model): """ Get a pretty-print representation of this object Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. """ parts = ['SelectorArguments('] for argument in self: parts.append( f" {argument.pretty_repr(model)}" ) parts.append(')') return '\n'.join(parts) @property def ignore(self): """Get the list of ignored inputs""" ignore = [argument.index for argument in self if argument.ignore] ignore.extend(self._kept_ignore) return ignore @property def kept_ignore(self): """The arguments to persist in ignoring""" return self._kept_ignore @classmethod def validate(cls, model, arguments, kept_ignore: List = None): """ Construct a valid Selector description for a CompoundBoundingBox. Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. arguments : The individual argument informations kept_ignore : Arguments to persist as ignored """ inputs = [] for argument in arguments: _input = _SelectorArgument.validate(model, *argument) if _input.index in [this.index for this in inputs]: raise ValueError(f"Input: '{get_name(model, _input.index)}' has been repeated.") inputs.append(_input) if len(inputs) == 0: raise ValueError("There must be at least one selector argument.") if isinstance(arguments, _SelectorArguments): if kept_ignore is None: kept_ignore = [] kept_ignore.extend(arguments.kept_ignore) return cls(tuple(inputs), kept_ignore) def get_selector(self, *inputs): """ Get the selector corresponding to these inputs Parameters ---------- *inputs : All the processed model evaluation inputs. """ return tuple([argument.get_selector(*inputs) for argument in self]) def is_selector(self, _selector): """ Determine if this is a reasonable selector Parameters ---------- _selector : tuple The selector to check """ return isinstance(_selector, tuple) and len(_selector) == len(self) def get_fixed_values(self, model, values: dict): """ Gets the value fixed input corresponding to this argument Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. values : dict Dictionary of fixed inputs. """ return tuple([argument.get_fixed_value(model, values) for argument in self]) def is_argument(self, model, argument) -> bool: """ Determine if passed argument is one of the selector arguments Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. argument : int or str A representation of which evaluation input is being used """ for selector_arg in self: if selector_arg.is_argument(model, argument): return True else: return False def selector_index(self, model, argument): """ Get the index of the argument passed in the selector tuples Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. argument : int or str A representation of which argument is being used """ for index, selector_arg in enumerate(self): if selector_arg.is_argument(model, argument): return index else: raise ValueError(f"{argument} does not correspond to any selector argument.") def reduce(self, model, argument): """ Reduce the selector arguments by the argument given Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. argument : int or str A representation of which argument is being used """ arguments = list(self) kept_ignore = [arguments.pop(self.selector_index(model, argument)).index] kept_ignore.extend(self._kept_ignore) return _SelectorArguments.validate(model, tuple(arguments), kept_ignore) def add_ignore(self, model, argument): """ Add argument to the kept_ignore list Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. argument : int or str A representation of which argument is being used """ if self.is_argument(model, argument): raise ValueError(f"{argument}: is a selector argument and cannot be ignored.") kept_ignore = [get_index(model, argument)] return _SelectorArguments.validate(model, self, kept_ignore) def named_tuple(self, model): """ Get a tuple of selector argument tuples using input names Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. """ return tuple([selector_arg.named_tuple(model) for selector_arg in self]) class CompoundBoundingBox(_BoundingDomain): """ A model's compound bounding box Parameters ---------- bounding_boxes : dict A dictionary containing all the ModelBoundingBoxes that are possible keys -> _selector (extracted from model inputs) values -> ModelBoundingBox model : `~astropy.modeling.Model` The Model this compound bounding_box is for. selector_args : _SelectorArguments A description of how to extract the selectors from model inputs. create_selector : optional A method which takes in the selector and the model to return a valid bounding corresponding to that selector. This can be used to construct new bounding_boxes for previously undefined selectors. These new boxes are then stored for future lookups. order : optional, str The ordering that is assumed for the tuple representation of the bounding_boxes. """ def __init__(self, bounding_boxes: Dict[Any, ModelBoundingBox], model, selector_args: _SelectorArguments, create_selector: Callable = None, ignored: List[int] = None, order: str = 'C'): super().__init__(model, ignored, order) self._create_selector = create_selector self._selector_args = _SelectorArguments.validate(model, selector_args) self._bounding_boxes = {} self._validate(bounding_boxes) def copy(self): bounding_boxes = {selector: bbox.copy(self.selector_args.ignore) for selector, bbox in self._bounding_boxes.items()} return CompoundBoundingBox(bounding_boxes, self._model, selector_args=self._selector_args, create_selector=copy.deepcopy(self._create_selector), order=self._order) def __repr__(self): parts = ['CompoundBoundingBox(', ' bounding_boxes={'] # bounding_boxes for _selector, bbox in self._bounding_boxes.items(): bbox_repr = bbox.__repr__().split('\n') parts.append(f" {_selector} = {bbox_repr.pop(0)}") for part in bbox_repr: parts.append(f" {part}") parts.append(' }') # selector_args selector_args_repr = self.selector_args.pretty_repr(self._model).split('\n') parts.append(f" selector_args = {selector_args_repr.pop(0)}") for part in selector_args_repr: parts.append(f" {part}") parts.append(')') return '\n'.join(parts) @property def bounding_boxes(self) -> Dict[Any, ModelBoundingBox]: return self._bounding_boxes @property def selector_args(self) -> _SelectorArguments: return self._selector_args @selector_args.setter def selector_args(self, value): self._selector_args = _SelectorArguments.validate(self._model, value) warnings.warn("Overriding selector_args may cause problems you should re-validate " "the compound bounding box before use!", RuntimeWarning) @property def named_selector_tuple(self) -> tuple: return self._selector_args.named_tuple(self._model) @property def create_selector(self): return self._create_selector @staticmethod def _get_selector_key(key): if isiterable(key): return tuple(key) else: return (key,) def __setitem__(self, key, value): _selector = self._get_selector_key(key) if not self.selector_args.is_selector(_selector): raise ValueError(f"{_selector} is not a selector!") ignored = self.selector_args.ignore + self.ignored self._bounding_boxes[_selector] = ModelBoundingBox.validate(self._model, value, ignored, order=self._order) def _validate(self, bounding_boxes: dict): for _selector, bounding_box in bounding_boxes.items(): self[_selector] = bounding_box def __eq__(self, value): if isinstance(value, CompoundBoundingBox): return (self.bounding_boxes == value.bounding_boxes and self.selector_args == value.selector_args and self.create_selector == value.create_selector) else: return False @classmethod def validate(cls, model, bounding_box: dict, selector_args=None, create_selector=None, ignored: list = None, order: str = 'C', _preserve_ignore: bool = False, **kwarg): """ Construct a valid compound bounding box for a model. Parameters ---------- model : `~astropy.modeling.Model` The model for which this will be a bounding_box bounding_box : dict Dictionary of possible bounding_box respresentations selector_args : optional Description of the selector arguments create_selector : optional, callable Method for generating new selectors order : optional, str The order that a tuple representation will be assumed to be Default: 'C' """ if isinstance(bounding_box, CompoundBoundingBox): if selector_args is None: selector_args = bounding_box.selector_args if create_selector is None: create_selector = bounding_box.create_selector order = bounding_box.order if _preserve_ignore: ignored = bounding_box.ignored bounding_box = bounding_box.bounding_boxes if selector_args is None: raise ValueError("Selector arguments must be provided " "(can be passed as part of bounding_box argument)") return cls(bounding_box, model, selector_args, create_selector=create_selector, ignored=ignored, order=order) def __contains__(self, key): return key in self._bounding_boxes def _create_bounding_box(self, _selector): self[_selector] = self._create_selector(_selector, model=self._model) return self[_selector] def __getitem__(self, key): _selector = self._get_selector_key(key) if _selector in self: return self._bounding_boxes[_selector] elif self._create_selector is not None: return self._create_bounding_box(_selector) else: raise RuntimeError(f"No bounding box is defined for selector: {_selector}.") def _select_bounding_box(self, inputs) -> ModelBoundingBox: _selector = self.selector_args.get_selector(*inputs) return self[_selector] def prepare_inputs(self, input_shape, inputs) -> Tuple[Any, Any, Any]: """ Get prepare the inputs with respect to the bounding box. Parameters ---------- input_shape : tuple The shape that all inputs have be reshaped/broadcasted into inputs : list List of all the model inputs Returns ------- valid_inputs : list The inputs reduced to just those inputs which are all inside their respective bounding box intervals valid_index : array_like array of all indices inside the bounding box all_out: bool if all of the inputs are outside the bounding_box """ bounding_box = self._select_bounding_box(inputs) return bounding_box.prepare_inputs(input_shape, inputs) def _matching_bounding_boxes(self, argument, value) -> Dict[Any, ModelBoundingBox]: selector_index = self.selector_args.selector_index(self._model, argument) matching = {} for selector_key, bbox in self._bounding_boxes.items(): if selector_key[selector_index] == value: new_selector_key = list(selector_key) new_selector_key.pop(selector_index) if bbox.has_interval(argument): new_bbox = bbox.fix_inputs(self._model, {argument: value}, _keep_ignored=True) else: new_bbox = bbox.copy() matching[tuple(new_selector_key)] = new_bbox if len(matching) == 0: raise ValueError(f"Attempting to fix input {argument}, but there are no " f"bounding boxes for argument value {value}.") return matching def _fix_input_selector_arg(self, argument, value): matching_bounding_boxes = self._matching_bounding_boxes(argument, value) if len(self.selector_args) == 1: return matching_bounding_boxes[()] else: return CompoundBoundingBox(matching_bounding_boxes, self._model, self.selector_args.reduce(self._model, argument)) def _fix_input_bbox_arg(self, argument, value): bounding_boxes = {} for selector_key, bbox in self._bounding_boxes.items(): bounding_boxes[selector_key] = bbox.fix_inputs(self._model, {argument: value}, _keep_ignored=True) return CompoundBoundingBox(bounding_boxes, self._model, self.selector_args.add_ignore(self._model, argument)) def fix_inputs(self, model, fixed_inputs: dict): """ Fix the bounding_box for a `fix_inputs` compound model. Parameters ---------- model : `~astropy.modeling.Model` The new model for which this will be a bounding_box fixed_inputs : dict Dictionary of inputs which have been fixed by this bounding box. """ fixed_input_keys = list(fixed_inputs.keys()) argument = fixed_input_keys.pop() value = fixed_inputs[argument] if self.selector_args.is_argument(self._model, argument): bbox = self._fix_input_selector_arg(argument, value) else: bbox = self._fix_input_bbox_arg(argument, value) if len(fixed_input_keys) > 0: new_fixed_inputs = fixed_inputs.copy() del new_fixed_inputs[argument] bbox = bbox.fix_inputs(model, new_fixed_inputs) if isinstance(bbox, CompoundBoundingBox): selector_args = bbox.named_selector_tuple bbox_dict = bbox elif isinstance(bbox, ModelBoundingBox): selector_args = None bbox_dict = bbox.named_intervals return bbox.__class__.validate(model, bbox_dict, order=bbox.order, selector_args=selector_args)
cd807957aeef12508041a99af4143ed386f3ed8ff0010315069a09e0b51e1e5a
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Mathematical models.""" # pylint: disable=line-too-long, too-many-lines, too-many-arguments, invalid-name import numpy as np from astropy import units as u from astropy.units import Quantity, UnitsError from .core import Fittable1DModel, Fittable2DModel from .parameters import InputParameterError, Parameter from .utils import ellipse_extent __all__ = ['AiryDisk2D', 'Moffat1D', 'Moffat2D', 'Box1D', 'Box2D', 'Const1D', 'Const2D', 'Ellipse2D', 'Disk2D', 'Gaussian1D', 'Gaussian2D', 'Linear1D', 'Lorentz1D', 'RickerWavelet1D', 'RickerWavelet2D', 'RedshiftScaleFactor', 'Multiply', 'Planar2D', 'Scale', 'Sersic1D', 'Sersic2D', 'Shift', 'Sine1D', 'Cosine1D', 'Tangent1D', 'ArcSine1D', 'ArcCosine1D', 'ArcTangent1D', 'Trapezoid1D', 'TrapezoidDisk2D', 'Ring2D', 'Voigt1D', 'KingProjectedAnalytic1D', 'Exponential1D', 'Logarithmic1D'] TWOPI = 2 * np.pi FLOAT_EPSILON = float(np.finfo(np.float32).tiny) # Note that we define this here rather than using the value defined in # astropy.stats to avoid importing astropy.stats every time astropy.modeling # is loaded. GAUSSIAN_SIGMA_TO_FWHM = 2.0 * np.sqrt(2.0 * np.log(2.0)) class Gaussian1D(Fittable1DModel): """ One dimensional Gaussian model. Parameters ---------- amplitude : float or `~astropy.units.Quantity`. Amplitude (peak value) of the Gaussian - for a normalized profile (integrating to 1), set amplitude = 1 / (stddev * np.sqrt(2 * np.pi)) mean : float or `~astropy.units.Quantity`. Mean of the Gaussian. stddev : float or `~astropy.units.Quantity`. Standard deviation of the Gaussian with FWHM = 2 * stddev * np.sqrt(2 * np.log(2)). Notes ----- Either all or none of input ``x``, ``mean`` and ``stddev`` must be provided consistently with compatible units or as unitless numbers. Model formula: .. math:: f(x) = A e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}} Examples -------- >>> from astropy.modeling import models >>> def tie_center(model): ... mean = 50 * model.stddev ... return mean >>> tied_parameters = {'mean': tie_center} Specify that 'mean' is a tied parameter in one of two ways: >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3, ... tied=tied_parameters) or >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3) >>> g1.mean.tied False >>> g1.mean.tied = tie_center >>> g1.mean.tied <function tie_center at 0x...> Fixed parameters: >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3, ... fixed={'stddev': True}) >>> g1.stddev.fixed True or >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3) >>> g1.stddev.fixed False >>> g1.stddev.fixed = True >>> g1.stddev.fixed True .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Gaussian1D plt.figure() s1 = Gaussian1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() See Also -------- Gaussian2D, Box1D, Moffat1D, Lorentz1D """ amplitude = Parameter(default=1, description="Amplitude (peak value) of the Gaussian") mean = Parameter(default=0, description="Position of peak (Gaussian)") # Ensure stddev makes sense if its bounds are not explicitly set. # stddev must be non-zero and positive. stddev = Parameter(default=1, bounds=(FLOAT_EPSILON, None), description="Standard deviation of the Gaussian") def bounding_box(self, factor=5.5): """ Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)`` Parameters ---------- factor : float The multiple of `stddev` used to define the limits. The default is 5.5, corresponding to a relative error < 1e-7. Examples -------- >>> from astropy.modeling.models import Gaussian1D >>> model = Gaussian1D(mean=0, stddev=2) >>> model.bounding_box ModelBoundingBox( intervals={ x: Interval(lower=-11.0, upper=11.0) } model=Gaussian1D(inputs=('x',)) order='C' ) This range can be set directly (see: `Model.bounding_box <astropy.modeling.Model.bounding_box>`) or by using a different factor, like: >>> model.bounding_box = model.bounding_box(factor=2) >>> model.bounding_box ModelBoundingBox( intervals={ x: Interval(lower=-4.0, upper=4.0) } model=Gaussian1D(inputs=('x',)) order='C' ) """ x0 = self.mean dx = factor * self.stddev return (x0 - dx, x0 + dx) @property def fwhm(self): """Gaussian full width at half maximum.""" return self.stddev * GAUSSIAN_SIGMA_TO_FWHM @staticmethod def evaluate(x, amplitude, mean, stddev): """ Gaussian1D model function. """ return amplitude * np.exp(- 0.5 * (x - mean) ** 2 / stddev ** 2) @staticmethod def fit_deriv(x, amplitude, mean, stddev): """ Gaussian1D model function derivatives. """ d_amplitude = np.exp(-0.5 / stddev ** 2 * (x - mean) ** 2) d_mean = amplitude * d_amplitude * (x - mean) / stddev ** 2 d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev ** 3 return [d_amplitude, d_mean, d_stddev] @property def input_units(self): if self.mean.unit is None: return None return {self.inputs[0]: self.mean.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'mean': inputs_unit[self.inputs[0]], 'stddev': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class Gaussian2D(Fittable2DModel): r""" Two dimensional Gaussian model. Parameters ---------- amplitude : float or `~astropy.units.Quantity`. Amplitude (peak value) of the Gaussian. x_mean : float or `~astropy.units.Quantity`. Mean of the Gaussian in x. y_mean : float or `~astropy.units.Quantity`. Mean of the Gaussian in y. x_stddev : float or `~astropy.units.Quantity` or None. Standard deviation of the Gaussian in x before rotating by theta. Must be None if a covariance matrix (``cov_matrix``) is provided. If no ``cov_matrix`` is given, ``None`` means the default value (1). y_stddev : float or `~astropy.units.Quantity` or None. Standard deviation of the Gaussian in y before rotating by theta. Must be None if a covariance matrix (``cov_matrix``) is provided. If no ``cov_matrix`` is given, ``None`` means the default value (1). theta : float or `~astropy.units.Quantity`, optional. The rotation angle as an angular quantity (`~astropy.units.Quantity` or `~astropy.coordinates.Angle`) or a value in radians (as a float). The rotation angle increases counterclockwise. Must be `None` if a covariance matrix (``cov_matrix``) is provided. If no ``cov_matrix`` is given, `None` means the default value (0). cov_matrix : ndarray, optional A 2x2 covariance matrix. If specified, overrides the ``x_stddev``, ``y_stddev``, and ``theta`` defaults. Notes ----- Either all or none of input ``x, y``, ``[x,y]_mean`` and ``[x,y]_stddev`` must be provided consistently with compatible units or as unitless numbers. Model formula: .. math:: f(x, y) = A e^{-a\left(x - x_{0}\right)^{2} -b\left(x - x_{0}\right) \left(y - y_{0}\right) -c\left(y - y_{0}\right)^{2}} Using the following definitions: .. math:: a = \left(\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} + \frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right) b = \left(\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{x}^{2}} - \frac{\sin{\left (2 \theta \right )}}{2 \sigma_{y}^{2}}\right) c = \left(\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} + \frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right) If using a ``cov_matrix``, the model is of the form: .. math:: f(x, y) = A e^{-0.5 \left( \vec{x} - \vec{x}_{0}\right)^{T} \Sigma^{-1} \left(\vec{x} - \vec{x}_{0} \right)} where :math:`\vec{x} = [x, y]`, :math:`\vec{x}_{0} = [x_{0}, y_{0}]`, and :math:`\Sigma` is the covariance matrix: .. math:: \Sigma = \left(\begin{array}{ccc} \sigma_x^2 & \rho \sigma_x \sigma_y \\ \rho \sigma_x \sigma_y & \sigma_y^2 \end{array}\right) :math:`\rho` is the correlation between ``x`` and ``y``, which should be between -1 and +1. Positive correlation corresponds to a ``theta`` in the range 0 to 90 degrees. Negative correlation corresponds to a ``theta`` in the range of 0 to -90 degrees. See [1]_ for more details about the 2D Gaussian function. See Also -------- Gaussian1D, Box2D, Moffat2D References ---------- .. [1] https://en.wikipedia.org/wiki/Gaussian_function """ amplitude = Parameter(default=1, description="Amplitude of the Gaussian") x_mean = Parameter(default=0, description="Peak position (along x axis) of Gaussian") y_mean = Parameter(default=0, description="Peak position (along y axis) of Gaussian") x_stddev = Parameter(default=1, description="Standard deviation of the Gaussian (along x axis)") y_stddev = Parameter(default=1, description="Standard deviation of the Gaussian (along y axis)") theta = Parameter(default=0.0, description=("Rotation angle either as a " "float (in radians) or a " "|Quantity| angle (optional)")) def __init__(self, amplitude=amplitude.default, x_mean=x_mean.default, y_mean=y_mean.default, x_stddev=None, y_stddev=None, theta=None, cov_matrix=None, **kwargs): if cov_matrix is None: if x_stddev is None: x_stddev = self.__class__.x_stddev.default if y_stddev is None: y_stddev = self.__class__.y_stddev.default if theta is None: theta = self.__class__.theta.default else: if x_stddev is not None or y_stddev is not None or theta is not None: raise InputParameterError("Cannot specify both cov_matrix and " "x/y_stddev/theta") # Compute principle coordinate system transformation cov_matrix = np.array(cov_matrix) if cov_matrix.shape != (2, 2): raise ValueError("Covariance matrix must be 2x2") eig_vals, eig_vecs = np.linalg.eig(cov_matrix) x_stddev, y_stddev = np.sqrt(eig_vals) y_vec = eig_vecs[:, 0] theta = np.arctan2(y_vec[1], y_vec[0]) # Ensure stddev makes sense if its bounds are not explicitly set. # stddev must be non-zero and positive. # TODO: Investigate why setting this in Parameter above causes # convolution tests to hang. kwargs.setdefault('bounds', {}) kwargs['bounds'].setdefault('x_stddev', (FLOAT_EPSILON, None)) kwargs['bounds'].setdefault('y_stddev', (FLOAT_EPSILON, None)) super().__init__( amplitude=amplitude, x_mean=x_mean, y_mean=y_mean, x_stddev=x_stddev, y_stddev=y_stddev, theta=theta, **kwargs) @property def x_fwhm(self): """Gaussian full width at half maximum in X.""" return self.x_stddev * GAUSSIAN_SIGMA_TO_FWHM @property def y_fwhm(self): """Gaussian full width at half maximum in Y.""" return self.y_stddev * GAUSSIAN_SIGMA_TO_FWHM def bounding_box(self, factor=5.5): """ Tuple defining the default ``bounding_box`` limits in each dimension, ``((y_low, y_high), (x_low, x_high))`` The default offset from the mean is 5.5-sigma, corresponding to a relative error < 1e-7. The limits are adjusted for rotation. Parameters ---------- factor : float, optional The multiple of `x_stddev` and `y_stddev` used to define the limits. The default is 5.5. Examples -------- >>> from astropy.modeling.models import Gaussian2D >>> model = Gaussian2D(x_mean=0, y_mean=0, x_stddev=1, y_stddev=2) >>> model.bounding_box ModelBoundingBox( intervals={ x: Interval(lower=-5.5, upper=5.5) y: Interval(lower=-11.0, upper=11.0) } model=Gaussian2D(inputs=('x', 'y')) order='C' ) This range can be set directly (see: `Model.bounding_box <astropy.modeling.Model.bounding_box>`) or by using a different factor like: >>> model.bounding_box = model.bounding_box(factor=2) >>> model.bounding_box ModelBoundingBox( intervals={ x: Interval(lower=-2.0, upper=2.0) y: Interval(lower=-4.0, upper=4.0) } model=Gaussian2D(inputs=('x', 'y')) order='C' ) """ a = factor * self.x_stddev b = factor * self.y_stddev dx, dy = ellipse_extent(a, b, self.theta) return ((self.y_mean - dy, self.y_mean + dy), (self.x_mean - dx, self.x_mean + dx)) @staticmethod def evaluate(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta): """Two dimensional Gaussian function""" cost2 = np.cos(theta) ** 2 sint2 = np.sin(theta) ** 2 sin2t = np.sin(2. * theta) xstd2 = x_stddev ** 2 ystd2 = y_stddev ** 2 xdiff = x - x_mean ydiff = y - y_mean a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2)) b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2)) c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2)) return amplitude * np.exp(-((a * xdiff ** 2) + (b * xdiff * ydiff) + (c * ydiff ** 2))) @staticmethod def fit_deriv(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta): """Two dimensional Gaussian function derivative with respect to parameters""" cost = np.cos(theta) sint = np.sin(theta) cost2 = np.cos(theta) ** 2 sint2 = np.sin(theta) ** 2 cos2t = np.cos(2. * theta) sin2t = np.sin(2. * theta) xstd2 = x_stddev ** 2 ystd2 = y_stddev ** 2 xstd3 = x_stddev ** 3 ystd3 = y_stddev ** 3 xdiff = x - x_mean ydiff = y - y_mean xdiff2 = xdiff ** 2 ydiff2 = ydiff ** 2 a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2)) b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2)) c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2)) g = amplitude * np.exp(-((a * xdiff2) + (b * xdiff * ydiff) + (c * ydiff2))) da_dtheta = (sint * cost * ((1. / ystd2) - (1. / xstd2))) da_dx_stddev = -cost2 / xstd3 da_dy_stddev = -sint2 / ystd3 db_dtheta = (cos2t / xstd2) - (cos2t / ystd2) db_dx_stddev = -sin2t / xstd3 db_dy_stddev = sin2t / ystd3 dc_dtheta = -da_dtheta dc_dx_stddev = -sint2 / xstd3 dc_dy_stddev = -cost2 / ystd3 dg_dA = g / amplitude dg_dx_mean = g * ((2. * a * xdiff) + (b * ydiff)) dg_dy_mean = g * ((b * xdiff) + (2. * c * ydiff)) dg_dx_stddev = g * (-(da_dx_stddev * xdiff2 + db_dx_stddev * xdiff * ydiff + dc_dx_stddev * ydiff2)) dg_dy_stddev = g * (-(da_dy_stddev * xdiff2 + db_dy_stddev * xdiff * ydiff + dc_dy_stddev * ydiff2)) dg_dtheta = g * (-(da_dtheta * xdiff2 + db_dtheta * xdiff * ydiff + dc_dtheta * ydiff2)) return [dg_dA, dg_dx_mean, dg_dy_mean, dg_dx_stddev, dg_dy_stddev, dg_dtheta] @property def input_units(self): if self.x_mean.unit is None and self.y_mean.unit is None: return None return {self.inputs[0]: self.x_mean.unit, self.inputs[1]: self.y_mean.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return {'x_mean': inputs_unit[self.inputs[0]], 'y_mean': inputs_unit[self.inputs[0]], 'x_stddev': inputs_unit[self.inputs[0]], 'y_stddev': inputs_unit[self.inputs[0]], 'theta': u.rad, 'amplitude': outputs_unit[self.outputs[0]]} class Shift(Fittable1DModel): """ Shift a coordinate. Parameters ---------- offset : float Offset to add to a coordinate. """ offset = Parameter(default=0, description="Offset to add to a model") linear = True _has_inverse_bounding_box = True @property def input_units(self): if self.offset.unit is None: return None return {self.inputs[0]: self.offset.unit} @property def inverse(self): """One dimensional inverse Shift model function""" inv = self.copy() inv.offset *= -1 try: self.bounding_box except NotImplementedError: pass else: inv.bounding_box = tuple(self.evaluate(x, self.offset) for x in self.bounding_box) return inv @staticmethod def evaluate(x, offset): """One dimensional Shift model function""" return x + offset @staticmethod def sum_of_implicit_terms(x): """Evaluate the implicit term (x) of one dimensional Shift model""" return x @staticmethod def fit_deriv(x, *params): """One dimensional Shift model derivative with respect to parameter""" d_offset = np.ones_like(x) return [d_offset] def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'offset': outputs_unit[self.outputs[0]]} class Scale(Fittable1DModel): """ Multiply a model by a dimensionless factor. Parameters ---------- factor : float Factor by which to scale a coordinate. Notes ----- If ``factor`` is a `~astropy.units.Quantity` then the units will be stripped before the scaling operation. """ factor = Parameter(default=1, description="Factor by which to scale a model") linear = True fittable = True _input_units_strict = True _input_units_allow_dimensionless = True _has_inverse_bounding_box = True @property def input_units(self): if self.factor.unit is None: return None return {self.inputs[0]: self.factor.unit} @property def inverse(self): """One dimensional inverse Scale model function""" inv = self.copy() inv.factor = 1 / self.factor try: self.bounding_box except NotImplementedError: pass else: inv.bounding_box = tuple(self.evaluate(x, self.factor) for x in self.bounding_box.bounding_box()) return inv @staticmethod def evaluate(x, factor): """One dimensional Scale model function""" if isinstance(factor, u.Quantity): factor = factor.value return factor * x @staticmethod def fit_deriv(x, *params): """One dimensional Scale model derivative with respect to parameter""" d_factor = x return [d_factor] def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'factor': outputs_unit[self.outputs[0]]} class Multiply(Fittable1DModel): """ Multiply a model by a quantity or number. Parameters ---------- factor : float Factor by which to multiply a coordinate. """ factor = Parameter(default=1, description="Factor by which to multiply a model") linear = True fittable = True _has_inverse_bounding_box = True @property def inverse(self): """One dimensional inverse multiply model function""" inv = self.copy() inv.factor = 1 / self.factor try: self.bounding_box except NotImplementedError: pass else: inv.bounding_box = tuple(self.evaluate(x, self.factor) for x in self.bounding_box.bounding_box()) return inv @staticmethod def evaluate(x, factor): """One dimensional multiply model function""" return factor * x @staticmethod def fit_deriv(x, *params): """One dimensional multiply model derivative with respect to parameter""" d_factor = x return [d_factor] def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'factor': outputs_unit[self.outputs[0]]} class RedshiftScaleFactor(Fittable1DModel): """ One dimensional redshift scale factor model. Parameters ---------- z : float Redshift value. Notes ----- Model formula: .. math:: f(x) = x (1 + z) """ z = Parameter(description='Redshift', default=0) _has_inverse_bounding_box = True @staticmethod def evaluate(x, z): """One dimensional RedshiftScaleFactor model function""" return (1 + z) * x @staticmethod def fit_deriv(x, z): """One dimensional RedshiftScaleFactor model derivative""" d_z = x return [d_z] @property def inverse(self): """Inverse RedshiftScaleFactor model""" inv = self.copy() inv.z = 1.0 / (1.0 + self.z) - 1.0 try: self.bounding_box except NotImplementedError: pass else: inv.bounding_box = tuple(self.evaluate(x, self.z) for x in self.bounding_box.bounding_box()) return inv class Sersic1D(Fittable1DModel): r""" One dimensional Sersic surface brightness profile. Parameters ---------- amplitude : float Surface brightness at r_eff. r_eff : float Effective (half-light) radius n : float Sersic Index. See Also -------- Gaussian1D, Moffat1D, Lorentz1D Notes ----- Model formula: .. math:: I(r)=I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\} The constant :math:`b_n` is defined such that :math:`r_e` contains half the total luminosity, and can be solved for numerically. .. math:: \Gamma(2n) = 2\gamma (b_n,2n) Examples -------- .. plot:: :include-source: import numpy as np from astropy.modeling.models import Sersic1D import matplotlib.pyplot as plt plt.figure() plt.subplot(111, xscale='log', yscale='log') s1 = Sersic1D(amplitude=1, r_eff=5) r=np.arange(0, 100, .01) for n in range(1, 10): s1.n = n plt.plot(r, s1(r), color=str(float(n) / 15)) plt.axis([1e-1, 30, 1e-2, 1e3]) plt.xlabel('log Radius') plt.ylabel('log Surface Brightness') plt.text(.25, 1.5, 'n=1') plt.text(.25, 300, 'n=10') plt.xticks([]) plt.yticks([]) plt.show() References ---------- .. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html """ amplitude = Parameter(default=1, description="Surface brightness at r_eff") r_eff = Parameter(default=1, description="Effective (half-light) radius") n = Parameter(default=4, description="Sersic Index") _gammaincinv = None @classmethod def evaluate(cls, r, amplitude, r_eff, n): """One dimensional Sersic profile function.""" if cls._gammaincinv is None: from scipy.special import gammaincinv cls._gammaincinv = gammaincinv return (amplitude * np.exp( -cls._gammaincinv(2 * n, 0.5) * ((r / r_eff) ** (1 / n) - 1))) @property def input_units(self): if self.r_eff.unit is None: return None return {self.inputs[0]: self.r_eff.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'r_eff': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class _Trigonometric1D(Fittable1DModel): """ Base class for one dimensional trigonometric and inverse trigonometric models Parameters ---------- amplitude : float Oscillation amplitude frequency : float Oscillation frequency phase : float Oscillation phase """ amplitude = Parameter(default=1, description="Oscillation amplitude") frequency = Parameter(default=1, description="Oscillation frequency") phase = Parameter(default=0, description="Oscillation phase") @property def input_units(self): if self.frequency.unit is None: return None return {self.inputs[0]: 1. / self.frequency.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'frequency': inputs_unit[self.inputs[0]] ** -1, 'amplitude': outputs_unit[self.outputs[0]]} class Sine1D(_Trigonometric1D): """ One dimensional Sine model. Parameters ---------- amplitude : float Oscillation amplitude frequency : float Oscillation frequency phase : float Oscillation phase See Also -------- ArcSine1D, Cosine1D, Tangent1D, Const1D, Linear1D Notes ----- Model formula: .. math:: f(x) = A \\sin(2 \\pi f x + 2 \\pi p) Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Sine1D plt.figure() s1 = Sine1D(amplitude=1, frequency=.25) r=np.arange(0, 10, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([0, 10, -5, 5]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional Sine model function""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = TWOPI * (frequency * x + phase) if isinstance(argument, Quantity): argument = argument.value return amplitude * np.sin(argument) @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional Sine model derivative""" d_amplitude = np.sin(TWOPI * frequency * x + TWOPI * phase) d_frequency = (TWOPI * x * amplitude * np.cos(TWOPI * frequency * x + TWOPI * phase)) d_phase = (TWOPI * amplitude * np.cos(TWOPI * frequency * x + TWOPI * phase)) return [d_amplitude, d_frequency, d_phase] @property def inverse(self): """One dimensional inverse of Sine""" return ArcSine1D(amplitude=self.amplitude, frequency=self.frequency, phase=self.phase) class Cosine1D(_Trigonometric1D): """ One dimensional Cosine model. Parameters ---------- amplitude : float Oscillation amplitude frequency : float Oscillation frequency phase : float Oscillation phase See Also -------- ArcCosine1D, Sine1D, Tangent1D, Const1D, Linear1D Notes ----- Model formula: .. math:: f(x) = A \\cos(2 \\pi f x + 2 \\pi p) Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Cosine1D plt.figure() s1 = Cosine1D(amplitude=1, frequency=.25) r=np.arange(0, 10, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([0, 10, -5, 5]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional Cosine model function""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = TWOPI * (frequency * x + phase) if isinstance(argument, Quantity): argument = argument.value return amplitude * np.cos(argument) @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional Cosine model derivative""" d_amplitude = np.cos(TWOPI * frequency * x + TWOPI * phase) d_frequency = - (TWOPI * x * amplitude * np.sin(TWOPI * frequency * x + TWOPI * phase)) d_phase = - (TWOPI * amplitude * np.sin(TWOPI * frequency * x + TWOPI * phase)) return [d_amplitude, d_frequency, d_phase] @property def inverse(self): """One dimensional inverse of Cosine""" return ArcCosine1D(amplitude=self.amplitude, frequency=self.frequency, phase=self.phase) class Tangent1D(_Trigonometric1D): """ One dimensional Tangent model. Parameters ---------- amplitude : float Oscillation amplitude frequency : float Oscillation frequency phase : float Oscillation phase See Also -------- Sine1D, Cosine1D, Const1D, Linear1D Notes ----- Model formula: .. math:: f(x) = A \\tan(2 \\pi f x + 2 \\pi p) Note that the tangent function is undefined for inputs of the form pi/2 + n*pi for all integers n. Thus thus the default bounding box has been restricted to: .. math:: [(-1/4 - p)/f, (1/4 - p)/f] which is the smallest interval for the tangent function to be continuous on. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Tangent1D plt.figure() s1 = Tangent1D(amplitude=1, frequency=.25) r=np.arange(0, 10, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([0, 10, -5, 5]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional Tangent model function""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = TWOPI * (frequency * x + phase) if isinstance(argument, Quantity): argument = argument.value return amplitude * np.tan(argument) @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional Tangent model derivative""" sec = 1 / (np.cos(TWOPI * frequency * x + TWOPI * phase))**2 d_amplitude = np.tan(TWOPI * frequency * x + TWOPI * phase) d_frequency = TWOPI * x * amplitude * sec d_phase = TWOPI * amplitude * sec return [d_amplitude, d_frequency, d_phase] @property def inverse(self): """One dimensional inverse of Tangent""" return ArcTangent1D(amplitude=self.amplitude, frequency=self.frequency, phase=self.phase) def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)`` """ bbox = [(-1/4 - self.phase) / self.frequency, (1/4 - self.phase) / self.frequency] if self.frequency.unit is not None: bbox = bbox / self.frequency.unit return bbox class _InverseTrigonometric1D(_Trigonometric1D): """ Base class for one dimensional inverse trigonometric models """ @property def input_units(self): if self.amplitude.unit is None: return None return {self.inputs[0]: self.amplitude.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'frequency': outputs_unit[self.outputs[0]] ** -1, 'amplitude': inputs_unit[self.inputs[0]]} class ArcSine1D(_InverseTrigonometric1D): """ One dimensional ArcSine model returning values between -pi/2 and pi/2 only. Parameters ---------- amplitude : float Oscillation amplitude for corresponding Sine frequency : float Oscillation frequency for corresponding Sine phase : float Oscillation phase for corresponding Sine See Also -------- Sine1D, ArcCosine1D, ArcTangent1D Notes ----- Model formula: .. math:: f(x) = ((arcsin(x / A) / 2pi) - p) / f The arcsin function being used for this model will only accept inputs in [-A, A]; otherwise, a runtime warning will be thrown and the result will be NaN. To avoid this, the bounding_box has been properly set to accommodate this; therefore, it is recommended that this model always be evaluated with the ``with_bounding_box=True`` option. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import ArcSine1D plt.figure() s1 = ArcSine1D(amplitude=1, frequency=.25) r=np.arange(-1, 1, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([-1, 1, -np.pi/2, np.pi/2]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional ArcSine model function""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = x / amplitude if isinstance(argument, Quantity): argument = argument.value arc_sine = np.arcsin(argument) / TWOPI return (arc_sine - phase) / frequency @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional ArcSine model derivative""" d_amplitude = - x / (TWOPI * frequency * amplitude**2 * np.sqrt(1 - (x / amplitude)**2)) d_frequency = (phase - (np.arcsin(x / amplitude) / TWOPI)) / frequency**2 d_phase = - 1 / frequency * np.ones(x.shape) return [d_amplitude, d_frequency, d_phase] def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)`` """ return -1 * self.amplitude, 1 * self.amplitude @property def inverse(self): """One dimensional inverse of ArcSine""" return Sine1D(amplitude=self.amplitude, frequency=self.frequency, phase=self.phase) class ArcCosine1D(_InverseTrigonometric1D): """ One dimensional ArcCosine returning values between 0 and pi only. Parameters ---------- amplitude : float Oscillation amplitude for corresponding Cosine frequency : float Oscillation frequency for corresponding Cosine phase : float Oscillation phase for corresponding Cosine See Also -------- Cosine1D, ArcSine1D, ArcTangent1D Notes ----- Model formula: .. math:: f(x) = ((arccos(x / A) / 2pi) - p) / f The arccos function being used for this model will only accept inputs in [-A, A]; otherwise, a runtime warning will be thrown and the result will be NaN. To avoid this, the bounding_box has been properly set to accommodate this; therefore, it is recommended that this model always be evaluated with the ``with_bounding_box=True`` option. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import ArcCosine1D plt.figure() s1 = ArcCosine1D(amplitude=1, frequency=.25) r=np.arange(-1, 1, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([-1, 1, 0, np.pi]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional ArcCosine model function""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = x / amplitude if isinstance(argument, Quantity): argument = argument.value arc_cos = np.arccos(argument) / TWOPI return (arc_cos - phase) / frequency @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional ArcCosine model derivative""" d_amplitude = x / (TWOPI * frequency * amplitude**2 * np.sqrt(1 - (x / amplitude)**2)) d_frequency = (phase - (np.arccos(x / amplitude) / TWOPI)) / frequency**2 d_phase = - 1 / frequency * np.ones(x.shape) return [d_amplitude, d_frequency, d_phase] def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)`` """ return -1 * self.amplitude, 1 * self.amplitude @property def inverse(self): """One dimensional inverse of ArcCosine""" return Cosine1D(amplitude=self.amplitude, frequency=self.frequency, phase=self.phase) class ArcTangent1D(_InverseTrigonometric1D): """ One dimensional ArcTangent model returning values between -pi/2 and pi/2 only. Parameters ---------- amplitude : float Oscillation amplitude for corresponding Tangent frequency : float Oscillation frequency for corresponding Tangent phase : float Oscillation phase for corresponding Tangent See Also -------- Tangent1D, ArcSine1D, ArcCosine1D Notes ----- Model formula: .. math:: f(x) = ((arctan(x / A) / 2pi) - p) / f Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import ArcTangent1D plt.figure() s1 = ArcTangent1D(amplitude=1, frequency=.25) r=np.arange(-10, 10, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([-10, 10, -np.pi/2, np.pi/2]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional ArcTangent model function""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = x / amplitude if isinstance(argument, Quantity): argument = argument.value arc_cos = np.arctan(argument) / TWOPI return (arc_cos - phase) / frequency @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional ArcTangent model derivative""" d_amplitude = - x / (TWOPI * frequency * amplitude**2 * (1 + (x / amplitude)**2)) d_frequency = (phase - (np.arctan(x / amplitude) / TWOPI)) / frequency**2 d_phase = - 1 / frequency * np.ones(x.shape) return [d_amplitude, d_frequency, d_phase] @property def inverse(self): """One dimensional inverse of ArcTangent""" return Tangent1D(amplitude=self.amplitude, frequency=self.frequency, phase=self.phase) class Linear1D(Fittable1DModel): """ One dimensional Line model. Parameters ---------- slope : float Slope of the straight line intercept : float Intercept of the straight line See Also -------- Const1D Notes ----- Model formula: .. math:: f(x) = a x + b """ slope = Parameter(default=1, description="Slope of the straight line") intercept = Parameter(default=0, description="Intercept of the straight line") linear = True @staticmethod def evaluate(x, slope, intercept): """One dimensional Line model function""" return slope * x + intercept @staticmethod def fit_deriv(x, *params): """One dimensional Line model derivative with respect to parameters""" d_slope = x d_intercept = np.ones_like(x) return [d_slope, d_intercept] @property def inverse(self): new_slope = self.slope ** -1 new_intercept = -self.intercept / self.slope return self.__class__(slope=new_slope, intercept=new_intercept) @property def input_units(self): if self.intercept.unit is None and self.slope.unit is None: return None return {self.inputs[0]: self.intercept.unit / self.slope.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'intercept': outputs_unit[self.outputs[0]], 'slope': outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]]} class Planar2D(Fittable2DModel): """ Two dimensional Plane model. Parameters ---------- slope_x : float Slope of the plane in X slope_y : float Slope of the plane in Y intercept : float Z-intercept of the plane Notes ----- Model formula: .. math:: f(x, y) = a x + b y + c """ slope_x = Parameter(default=1, description="Slope of the plane in X") slope_y = Parameter(default=1, description="Slope of the plane in Y") intercept = Parameter(default=0, description="Z-intercept of the plane") linear = True @staticmethod def evaluate(x, y, slope_x, slope_y, intercept): """Two dimensional Plane model function""" return slope_x * x + slope_y * y + intercept @staticmethod def fit_deriv(x, y, *params): """Two dimensional Plane model derivative with respect to parameters""" d_slope_x = x d_slope_y = y d_intercept = np.ones_like(x) return [d_slope_x, d_slope_y, d_intercept] def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'intercept': outputs_unit['z'], 'slope_x': outputs_unit['z'] / inputs_unit['x'], 'slope_y': outputs_unit['z'] / inputs_unit['y']} class Lorentz1D(Fittable1DModel): """ One dimensional Lorentzian model. Parameters ---------- amplitude : float or `~astropy.units.Quantity`. Peak value - for a normalized profile (integrating to 1), set amplitude = 2 / (np.pi * fwhm) x_0 : float or `~astropy.units.Quantity`. Position of the peak fwhm : float or `~astropy.units.Quantity`. Full width at half maximum (FWHM) See Also -------- Gaussian1D, Box1D, RickerWavelet1D Notes ----- Either all or none of input ``x``, position ``x_0`` and ``fwhm`` must be provided consistently with compatible units or as unitless numbers. Model formula: .. math:: f(x) = \\frac{A \\gamma^{2}}{\\gamma^{2} + \\left(x - x_{0}\\right)^{2}} where :math:`\\gamma` is half of given FWHM. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Lorentz1D plt.figure() s1 = Lorentz1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() """ amplitude = Parameter(default=1, description="Peak value") x_0 = Parameter(default=0, description="Position of the peak") fwhm = Parameter(default=1, description="Full width at half maximum") @staticmethod def evaluate(x, amplitude, x_0, fwhm): """One dimensional Lorentzian model function""" return (amplitude * ((fwhm / 2.) ** 2) / ((x - x_0) ** 2 + (fwhm / 2.) ** 2)) @staticmethod def fit_deriv(x, amplitude, x_0, fwhm): """One dimensional Lorentzian model derivative with respect to parameters""" d_amplitude = fwhm ** 2 / (fwhm ** 2 + (x - x_0) ** 2) d_x_0 = (amplitude * d_amplitude * (2 * x - 2 * x_0) / (fwhm ** 2 + (x - x_0) ** 2)) d_fwhm = 2 * amplitude * d_amplitude / fwhm * (1 - d_amplitude) return [d_amplitude, d_x_0, d_fwhm] def bounding_box(self, factor=25): """Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)``. Parameters ---------- factor : float The multiple of FWHM used to define the limits. Default is chosen to include most (99%) of the area under the curve, while still showing the central feature of interest. """ x0 = self.x_0 dx = factor * self.fwhm return (x0 - dx, x0 + dx) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_0': inputs_unit[self.inputs[0]], 'fwhm': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class Voigt1D(Fittable1DModel): """ One dimensional model for the Voigt profile. Parameters ---------- x_0 : float or `~astropy.units.Quantity` Position of the peak amplitude_L : float or `~astropy.units.Quantity`. The Lorentzian amplitude (peak of the associated Lorentz function) - for a normalized profile (integrating to 1), set amplitude_L = 2 / (np.pi * fwhm_L) fwhm_L : float or `~astropy.units.Quantity` The Lorentzian full width at half maximum fwhm_G : float or `~astropy.units.Quantity`. The Gaussian full width at half maximum method : str, optional Algorithm for computing the complex error function; one of 'Humlicek2' (default, fast and generally more accurate than ``rtol=3.e-5``) or 'Scipy', alternatively 'wofz' (requires ``scipy``, almost as fast and reference in accuracy). See Also -------- Gaussian1D, Lorentz1D Notes ----- Either all or none of input ``x``, position ``x_0`` and the ``fwhm_*`` must be provided consistently with compatible units or as unitless numbers. Voigt function is calculated as real part of the complex error function computed from either Humlicek's rational approximations (JQSRT 21:309, 1979; 27:437, 1982) following Schreier 2018 (MNRAS 479, 3068; and ``hum2zpf16m`` from his cpfX.py module); or `~scipy.special.wofz` (implementing 'Faddeeva.cc'). Examples -------- .. plot:: :include-source: import numpy as np from astropy.modeling.models import Voigt1D import matplotlib.pyplot as plt plt.figure() x = np.arange(0, 10, 0.01) v1 = Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9) plt.plot(x, v1(x)) plt.show() """ x_0 = Parameter(default=0, description="Position of the peak") amplitude_L = Parameter(default=1, # noqa: N815 description="The Lorentzian amplitude") fwhm_L = Parameter(default=2/np.pi, # noqa: N815 description="The Lorentzian full width at half maximum") fwhm_G = Parameter(default=np.log(2), # noqa: N815 description="The Gaussian full width at half maximum") sqrt_pi = np.sqrt(np.pi) sqrt_ln2 = np.sqrt(np.log(2)) sqrt_ln2pi = np.sqrt(np.log(2) * np.pi) _last_z = np.zeros(1, dtype=complex) _last_w = np.zeros(1, dtype=float) _faddeeva = None def __init__(self, x_0=x_0.default, amplitude_L=amplitude_L.default, # noqa: N803 fwhm_L=fwhm_L.default, fwhm_G=fwhm_G.default, method='humlicek2', # noqa: N803 **kwargs): if str(method).lower() in ('wofz', 'scipy'): from scipy.special import wofz self._faddeeva = wofz elif str(method).lower() == 'humlicek2': self._faddeeva = self._hum2zpf16c else: raise ValueError(f'Not a valid method for Voigt1D Faddeeva function: {method}.') self.method = self._faddeeva.__name__ super().__init__(x_0=x_0, amplitude_L=amplitude_L, fwhm_L=fwhm_L, fwhm_G=fwhm_G, **kwargs) def _wrap_wofz(self, z): """Call complex error (Faddeeva) function w(z) implemented by algorithm `method`; cache results for consecutive calls from `evaluate`, `fit_deriv`.""" if (z.shape == self._last_z.shape and np.allclose(z, self._last_z, rtol=1.e-14, atol=1.e-15)): return self._last_w self._last_w = self._faddeeva(z) self._last_z = z return self._last_w def evaluate(self, x, x_0, amplitude_L, fwhm_L, fwhm_G): # noqa: N803 """One dimensional Voigt function scaled to Lorentz peak amplitude.""" z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * self.sqrt_ln2 / fwhm_G # The normalised Voigt profile is w.real * self.sqrt_ln2 / (self.sqrt_pi * fwhm_G) * 2 ; # for the legacy definition we multiply with np.pi * fwhm_L / 2 * amplitude_L return self._wrap_wofz(z).real * self.sqrt_ln2pi / fwhm_G * fwhm_L * amplitude_L def fit_deriv(self, x, x_0, amplitude_L, fwhm_L, fwhm_G): # noqa: N803 """Derivative of the one dimensional Voigt function with respect to parameters.""" s = self.sqrt_ln2 / fwhm_G z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * s # V * constant from McLean implementation (== their Voigt function) w = self._wrap_wofz(z) * s * fwhm_L * amplitude_L * self.sqrt_pi # Schreier (2018) Eq. 6 == (dvdx + 1j * dvdy) / (sqrt(pi) * fwhm_L * amplitude_L) dwdz = -2 * z * w + 2j * s * fwhm_L * amplitude_L return [-dwdz.real * 2 * s, w.real / amplitude_L, w.real / fwhm_L - dwdz.imag * s, (-w.real - s * (2 * (x - x_0) * dwdz.real - fwhm_L * dwdz.imag)) / fwhm_G] @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_0': inputs_unit[self.inputs[0]], 'fwhm_L': inputs_unit[self.inputs[0]], 'fwhm_G': inputs_unit[self.inputs[0]], 'amplitude_L': outputs_unit[self.outputs[0]]} @staticmethod def _hum2zpf16c(z, s=10.0): """Complex error function w(z) for z = x + iy combining Humlicek's rational approximations: |x| + y > 10: Humlicek (JQSRT, 1982) rational approximation for region II; else: Humlicek (JQSRT, 1979) rational approximation with n=16 and delta=y0=1.35 Version using a mask and np.place; single complex argument version of Franz Schreier's cpfX.hum2zpf16m. Originally licensed under a 3-clause BSD style license - see https://atmos.eoc.dlr.de/tools/lbl4IR/cpfX.py """ # Optimized (single fraction) Humlicek region I rational approximation for n=16, delta=1.35 AA = np.array([+46236.3358828121, -147726.58393079657j, # noqa: N806 -206562.80451354137, 281369.1590631087j, +183092.74968253175, -184787.96830696272j, -66155.39578477248, 57778.05827983565j, +11682.770904216826, -9442.402767960672j, -1052.8438624933142, 814.0996198624186j, +45.94499030751872, -34.59751573708725j, -0.7616559377907136, 0.5641895835476449j]) # 1j/sqrt(pi) to the 12. digit bb = np.array([+7918.06640624997, 0.0, -126689.0625, 0.0, +295607.8125, 0.0, -236486.25, 0.0, +84459.375, 0.0, -15015.0, 0.0, +1365.0, 0.0, -60.0, 0.0, +1.0]) sqrt_piinv = 1.0 / np.sqrt(np.pi) zz = z * z w = 1j * (z * (zz * sqrt_piinv - 1.410474)) / (0.75 + zz*(zz - 3.0)) if np.any(z.imag < s): mask = abs(z.real) + z.imag < s # returns true for interior points # returns small complex array covering only the interior region Z = z[np.where(mask)] + 1.35j ZZ = Z * Z numer = (((((((((((((((AA[15]*Z + AA[14])*Z + AA[13])*Z + AA[12])*Z + AA[11])*Z + AA[10])*Z + AA[9])*Z + AA[8])*Z + AA[7])*Z + AA[6])*Z + AA[5])*Z + AA[4])*Z+AA[3])*Z + AA[2])*Z + AA[1])*Z + AA[0]) denom = (((((((ZZ + bb[14])*ZZ + bb[12])*ZZ + bb[10])*ZZ+bb[8])*ZZ + bb[6])*ZZ + bb[4])*ZZ + bb[2])*ZZ + bb[0] np.place(w, mask, numer / denom) return w class Const1D(Fittable1DModel): """ One dimensional Constant model. Parameters ---------- amplitude : float Value of the constant function See Also -------- Const2D Notes ----- Model formula: .. math:: f(x) = A Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Const1D plt.figure() s1 = Const1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() """ amplitude = Parameter(default=1, description="Value of the constant function") linear = True @staticmethod def evaluate(x, amplitude): """One dimensional Constant model function""" if amplitude.size == 1: # This is slightly faster than using ones_like and multiplying x = np.empty_like(x, subok=False) x.fill(amplitude.item()) else: # This case is less likely but could occur if the amplitude # parameter is given an array-like value x = amplitude * np.ones_like(x, subok=False) if isinstance(amplitude, Quantity): return Quantity(x, unit=amplitude.unit, copy=False) return x @staticmethod def fit_deriv(x, amplitude): """One dimensional Constant model derivative with respect to parameters""" d_amplitude = np.ones_like(x) return [d_amplitude] @property def input_units(self): return None def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'amplitude': outputs_unit[self.outputs[0]]} class Const2D(Fittable2DModel): """ Two dimensional Constant model. Parameters ---------- amplitude : float Value of the constant function See Also -------- Const1D Notes ----- Model formula: .. math:: f(x, y) = A """ amplitude = Parameter(default=1, description="Value of the constant function") linear = True @staticmethod def evaluate(x, y, amplitude): """Two dimensional Constant model function""" if amplitude.size == 1: # This is slightly faster than using ones_like and multiplying x = np.empty_like(x, subok=False) x.fill(amplitude.item()) else: # This case is less likely but could occur if the amplitude # parameter is given an array-like value x = amplitude * np.ones_like(x, subok=False) if isinstance(amplitude, Quantity): return Quantity(x, unit=amplitude.unit, copy=False) return x @property def input_units(self): return None def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'amplitude': outputs_unit[self.outputs[0]]} class Ellipse2D(Fittable2DModel): """ A 2D Ellipse model. Parameters ---------- amplitude : float Value of the ellipse. x_0 : float x position of the center of the disk. y_0 : float y position of the center of the disk. a : float The length of the semimajor axis. b : float The length of the semiminor axis. theta : float or `~astropy.units.Quantity`, optional The rotation angle as an angular quantity (`~astropy.units.Quantity` or `~astropy.coordinates.Angle`) or a value in radians (as a float). The rotation angle increases counterclockwise from the positive x axis. See Also -------- Disk2D, Box2D Notes ----- Model formula: .. math:: f(x, y) = \\left \\{ \\begin{array}{ll} \\mathrm{amplitude} & : \\left[\\frac{(x - x_0) \\cos \\theta + (y - y_0) \\sin \\theta}{a}\\right]^2 + \\left[\\frac{-(x - x_0) \\sin \\theta + (y - y_0) \\cos \\theta}{b}\\right]^2 \\leq 1 \\\\ 0 & : \\mathrm{otherwise} \\end{array} \\right. Examples -------- .. plot:: :include-source: import numpy as np from astropy.modeling.models import Ellipse2D from astropy.coordinates import Angle import matplotlib.pyplot as plt import matplotlib.patches as mpatches x0, y0 = 25, 25 a, b = 20, 10 theta = Angle(30, 'deg') e = Ellipse2D(amplitude=100., x_0=x0, y_0=y0, a=a, b=b, theta=theta.radian) y, x = np.mgrid[0:50, 0:50] fig, ax = plt.subplots(1, 1) ax.imshow(e(x, y), origin='lower', interpolation='none', cmap='Greys_r') e2 = mpatches.Ellipse((x0, y0), 2*a, 2*b, theta.degree, edgecolor='red', facecolor='none') ax.add_patch(e2) plt.show() """ amplitude = Parameter(default=1, description="Value of the ellipse") x_0 = Parameter(default=0, description="X position of the center of the disk.") y_0 = Parameter(default=0, description="Y position of the center of the disk.") a = Parameter(default=1, description="The length of the semimajor axis") b = Parameter(default=1, description="The length of the semiminor axis") theta = Parameter(default=0.0, description=("Rotation angle either as a " "float (in radians) or a " "|Quantity| angle")) @staticmethod def evaluate(x, y, amplitude, x_0, y_0, a, b, theta): """Two dimensional Ellipse model function.""" xx = x - x_0 yy = y - y_0 cost = np.cos(theta) sint = np.sin(theta) numerator1 = (xx * cost) + (yy * sint) numerator2 = -(xx * sint) + (yy * cost) in_ellipse = (((numerator1 / a) ** 2 + (numerator2 / b) ** 2) <= 1.) result = np.select([in_ellipse], [amplitude]) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits. ``((y_low, y_high), (x_low, x_high))`` """ a = self.a b = self.b theta = self.theta dx, dy = ellipse_extent(a, b, theta) return ((self.y_0 - dy, self.y_0 + dy), (self.x_0 - dx, self.x_0 + dx)) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return {'x_0': inputs_unit[self.inputs[0]], 'y_0': inputs_unit[self.inputs[0]], 'a': inputs_unit[self.inputs[0]], 'b': inputs_unit[self.inputs[0]], 'theta': u.rad, 'amplitude': outputs_unit[self.outputs[0]]} class Disk2D(Fittable2DModel): """ Two dimensional radial symmetric Disk model. Parameters ---------- amplitude : float Value of the disk function x_0 : float x position center of the disk y_0 : float y position center of the disk R_0 : float Radius of the disk See Also -------- Box2D, TrapezoidDisk2D Notes ----- Model formula: .. math:: f(r) = \\left \\{ \\begin{array}{ll} A & : r \\leq R_0 \\\\ 0 & : r > R_0 \\end{array} \\right. """ amplitude = Parameter(default=1, description="Value of disk function") x_0 = Parameter(default=0, description="X position of center of the disk") y_0 = Parameter(default=0, description="Y position of center of the disk") R_0 = Parameter(default=1, description="Radius of the disk") @staticmethod def evaluate(x, y, amplitude, x_0, y_0, R_0): """Two dimensional Disk model function""" rr = (x - x_0) ** 2 + (y - y_0) ** 2 result = np.select([rr <= R_0 ** 2], [amplitude]) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits. ``((y_low, y_high), (x_low, x_high))`` """ return ((self.y_0 - self.R_0, self.y_0 + self.R_0), (self.x_0 - self.R_0, self.x_0 + self.R_0)) @property def input_units(self): if self.x_0.unit is None and self.y_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return {'x_0': inputs_unit[self.inputs[0]], 'y_0': inputs_unit[self.inputs[0]], 'R_0': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class Ring2D(Fittable2DModel): """ Two dimensional radial symmetric Ring model. Parameters ---------- amplitude : float Value of the disk function x_0 : float x position center of the disk y_0 : float y position center of the disk r_in : float Inner radius of the ring width : float Width of the ring. r_out : float Outer Radius of the ring. Can be specified instead of width. See Also -------- Disk2D, TrapezoidDisk2D Notes ----- Model formula: .. math:: f(r) = \\left \\{ \\begin{array}{ll} A & : r_{in} \\leq r \\leq r_{out} \\\\ 0 & : \\text{else} \\end{array} \\right. Where :math:`r_{out} = r_{in} + r_{width}`. """ amplitude = Parameter(default=1, description="Value of the disk function") x_0 = Parameter(default=0, description="X position of center of disc") y_0 = Parameter(default=0, description="Y position of center of disc") r_in = Parameter(default=1, description="Inner radius of the ring") width = Parameter(default=1, description="Width of the ring") def __init__(self, amplitude=amplitude.default, x_0=x_0.default, y_0=y_0.default, r_in=None, width=None, r_out=None, **kwargs): if (r_in is None) and (r_out is None) and (width is None): r_in = self.r_in.default width = self.width.default elif (r_in is not None) and (r_out is None) and (width is None): width = self.width.default elif (r_in is None) and (r_out is not None) and (width is None): r_in = self.r_in.default width = r_out - r_in elif (r_in is None) and (r_out is None) and (width is not None): r_in = self.r_in.default elif (r_in is not None) and (r_out is not None) and (width is None): width = r_out - r_in elif (r_in is None) and (r_out is not None) and (width is not None): r_in = r_out - width elif (r_in is not None) and (r_out is not None) and (width is not None): if np.any(width != (r_out - r_in)): raise InputParameterError("Width must be r_out - r_in") if np.any(r_in < 0) or np.any(width < 0): raise InputParameterError(f"{r_in=} and {width=} must both be >=0") super().__init__( amplitude=amplitude, x_0=x_0, y_0=y_0, r_in=r_in, width=width, **kwargs) @staticmethod def evaluate(x, y, amplitude, x_0, y_0, r_in, width): """Two dimensional Ring model function.""" rr = (x - x_0) ** 2 + (y - y_0) ** 2 r_range = np.logical_and(rr >= r_in ** 2, rr <= (r_in + width) ** 2) result = np.select([r_range], [amplitude]) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box``. ``((y_low, y_high), (x_low, x_high))`` """ dr = self.r_in + self.width return ((self.y_0 - dr, self.y_0 + dr), (self.x_0 - dr, self.x_0 + dr)) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return {'x_0': inputs_unit[self.inputs[0]], 'y_0': inputs_unit[self.inputs[0]], 'r_in': inputs_unit[self.inputs[0]], 'width': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class Box1D(Fittable1DModel): """ One dimensional Box model. Parameters ---------- amplitude : float Amplitude A x_0 : float Position of the center of the box function width : float Width of the box See Also -------- Box2D, TrapezoidDisk2D Notes ----- Model formula: .. math:: f(x) = \\left \\{ \\begin{array}{ll} A & : x_0 - w/2 \\leq x \\leq x_0 + w/2 \\\\ 0 & : \\text{else} \\end{array} \\right. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Box1D plt.figure() s1 = Box1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor s1.width = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() """ amplitude = Parameter(default=1, description="Amplitude A") x_0 = Parameter(default=0, description="Position of center of box function") width = Parameter(default=1, description="Width of the box") @staticmethod def evaluate(x, amplitude, x_0, width): """One dimensional Box model function""" inside = np.logical_and(x >= x_0 - width / 2., x <= x_0 + width / 2.) return np.select([inside], [amplitude], 0) @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits. ``(x_low, x_high))`` """ dx = self.width / 2 return (self.x_0 - dx, self.x_0 + dx) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} @property def return_units(self): if self.amplitude.unit is None: return None return {self.outputs[0]: self.amplitude.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_0': inputs_unit[self.inputs[0]], 'width': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class Box2D(Fittable2DModel): """ Two dimensional Box model. Parameters ---------- amplitude : float Amplitude x_0 : float x position of the center of the box function x_width : float Width in x direction of the box y_0 : float y position of the center of the box function y_width : float Width in y direction of the box See Also -------- Box1D, Gaussian2D, Moffat2D Notes ----- Model formula: .. math:: f(x, y) = \\left \\{ \\begin{array}{ll} A : & x_0 - w_x/2 \\leq x \\leq x_0 + w_x/2 \\text{ and} \\\\ & y_0 - w_y/2 \\leq y \\leq y_0 + w_y/2 \\\\ 0 : & \\text{else} \\end{array} \\right. """ amplitude = Parameter(default=1, description="Amplitude") x_0 = Parameter(default=0, description="X position of the center of the box function") y_0 = Parameter(default=0, description="Y position of the center of the box function") x_width = Parameter(default=1, description="Width in x direction of the box") y_width = Parameter(default=1, description="Width in y direction of the box") @staticmethod def evaluate(x, y, amplitude, x_0, y_0, x_width, y_width): """Two dimensional Box model function""" x_range = np.logical_and(x >= x_0 - x_width / 2., x <= x_0 + x_width / 2.) y_range = np.logical_and(y >= y_0 - y_width / 2., y <= y_0 + y_width / 2.) result = np.select([np.logical_and(x_range, y_range)], [amplitude], 0) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box``. ``((y_low, y_high), (x_low, x_high))`` """ dx = self.x_width / 2 dy = self.y_width / 2 return ((self.y_0 - dy, self.y_0 + dy), (self.x_0 - dx, self.x_0 + dx)) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_0': inputs_unit[self.inputs[0]], 'y_0': inputs_unit[self.inputs[1]], 'x_width': inputs_unit[self.inputs[0]], 'y_width': inputs_unit[self.inputs[1]], 'amplitude': outputs_unit[self.outputs[0]]} class Trapezoid1D(Fittable1DModel): """ One dimensional Trapezoid model. Parameters ---------- amplitude : float Amplitude of the trapezoid x_0 : float Center position of the trapezoid width : float Width of the constant part of the trapezoid. slope : float Slope of the tails of the trapezoid See Also -------- Box1D, Gaussian1D, Moffat1D Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Trapezoid1D plt.figure() s1 = Trapezoid1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor s1.width = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() """ amplitude = Parameter(default=1, description="Amplitude of the trapezoid") x_0 = Parameter(default=0, description="Center position of the trapezoid") width = Parameter(default=1, description="Width of constant part of the trapezoid") slope = Parameter(default=1, description="Slope of the tails of trapezoid") @staticmethod def evaluate(x, amplitude, x_0, width, slope): """One dimensional Trapezoid model function""" # Compute the four points where the trapezoid changes slope # x1 <= x2 <= x3 <= x4 x2 = x_0 - width / 2. x3 = x_0 + width / 2. x1 = x2 - amplitude / slope x4 = x3 + amplitude / slope # Compute model values in pieces between the change points range_a = np.logical_and(x >= x1, x < x2) range_b = np.logical_and(x >= x2, x < x3) range_c = np.logical_and(x >= x3, x < x4) val_a = slope * (x - x1) val_b = amplitude val_c = slope * (x4 - x) result = np.select([range_a, range_b, range_c], [val_a, val_b, val_c]) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits. ``(x_low, x_high))`` """ dx = self.width / 2 + self.amplitude / self.slope return (self.x_0 - dx, self.x_0 + dx) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_0': inputs_unit[self.inputs[0]], 'width': inputs_unit[self.inputs[0]], 'slope': outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class TrapezoidDisk2D(Fittable2DModel): """ Two dimensional circular Trapezoid model. Parameters ---------- amplitude : float Amplitude of the trapezoid x_0 : float x position of the center of the trapezoid y_0 : float y position of the center of the trapezoid R_0 : float Radius of the constant part of the trapezoid. slope : float Slope of the tails of the trapezoid in x direction. See Also -------- Disk2D, Box2D """ amplitude = Parameter(default=1, description="Amplitude of the trapezoid") x_0 = Parameter(default=0, description="X position of the center of the trapezoid") y_0 = Parameter(default=0, description="Y position of the center of the trapezoid") R_0 = Parameter(default=1, description="Radius of constant part of trapezoid") slope = Parameter(default=1, description="Slope of tails of trapezoid in x direction") @staticmethod def evaluate(x, y, amplitude, x_0, y_0, R_0, slope): """Two dimensional Trapezoid Disk model function""" r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) range_1 = r <= R_0 range_2 = np.logical_and(r > R_0, r <= R_0 + amplitude / slope) val_1 = amplitude val_2 = amplitude + slope * (R_0 - r) result = np.select([range_1, range_2], [val_1, val_2]) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box``. ``((y_low, y_high), (x_low, x_high))`` """ dr = self.R_0 + self.amplitude / self.slope return ((self.y_0 - dr, self.y_0 + dr), (self.x_0 - dr, self.x_0 + dr)) @property def input_units(self): if self.x_0.unit is None and self.y_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit['x'] != inputs_unit['y']: raise UnitsError("Units of 'x' and 'y' inputs should match") return {'x_0': inputs_unit[self.inputs[0]], 'y_0': inputs_unit[self.inputs[0]], 'R_0': inputs_unit[self.inputs[0]], 'slope': outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class RickerWavelet1D(Fittable1DModel): """ One dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat" model). .. note:: See https://github.com/astropy/astropy/pull/9445 for discussions related to renaming of this model. Parameters ---------- amplitude : float Amplitude x_0 : float Position of the peak sigma : float Width of the Ricker wavelet See Also -------- RickerWavelet2D, Box1D, Gaussian1D, Trapezoid1D Notes ----- Model formula: .. math:: f(x) = {A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}}{\\sigma^{2}}\\right) e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}} Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import RickerWavelet1D plt.figure() s1 = RickerWavelet1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor s1.width = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -2, 4]) plt.show() """ amplitude = Parameter(default=1, description="Amplitude (peak) value") x_0 = Parameter(default=0, description="Position of the peak") sigma = Parameter(default=1, description="Width of the Ricker wavelet") @staticmethod def evaluate(x, amplitude, x_0, sigma): """One dimensional Ricker Wavelet model function""" xx_ww = (x - x_0) ** 2 / (2 * sigma ** 2) return amplitude * (1 - 2 * xx_ww) * np.exp(-xx_ww) def bounding_box(self, factor=10.0): """Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)``. Parameters ---------- factor : float The multiple of sigma used to define the limits. """ x0 = self.x_0 dx = factor * self.sigma return (x0 - dx, x0 + dx) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_0': inputs_unit[self.inputs[0]], 'sigma': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class RickerWavelet2D(Fittable2DModel): """ Two dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat" model). .. note:: See https://github.com/astropy/astropy/pull/9445 for discussions related to renaming of this model. Parameters ---------- amplitude : float Amplitude x_0 : float x position of the peak y_0 : float y position of the peak sigma : float Width of the Ricker wavelet See Also -------- RickerWavelet1D, Gaussian2D Notes ----- Model formula: .. math:: f(x, y) = A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2} + \\left(y - y_{0}\\right)^{2}}{\\sigma^{2}}\\right) e^{\\frac{- \\left(x - x_{0}\\right)^{2} - \\left(y - y_{0}\\right)^{2}}{2 \\sigma^{2}}} """ amplitude = Parameter(default=1, description="Amplitude (peak) value") x_0 = Parameter(default=0, description="X position of the peak") y_0 = Parameter(default=0, description="Y position of the peak") sigma = Parameter(default=1, description="Width of the Ricker wavelet") @staticmethod def evaluate(x, y, amplitude, x_0, y_0, sigma): """Two dimensional Ricker Wavelet model function""" rr_ww = ((x - x_0) ** 2 + (y - y_0) ** 2) / (2 * sigma ** 2) return amplitude * (1 - rr_ww) * np.exp(- rr_ww) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return {'x_0': inputs_unit[self.inputs[0]], 'y_0': inputs_unit[self.inputs[0]], 'sigma': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class AiryDisk2D(Fittable2DModel): """ Two dimensional Airy disk model. Parameters ---------- amplitude : float Amplitude of the Airy function. x_0 : float x position of the maximum of the Airy function. y_0 : float y position of the maximum of the Airy function. radius : float The radius of the Airy disk (radius of the first zero). See Also -------- Box2D, TrapezoidDisk2D, Gaussian2D Notes ----- Model formula: .. math:: f(r) = A \\left[ \\frac{2 J_1(\\frac{\\pi r}{R/R_z})}{\\frac{\\pi r}{R/R_z}} \\right]^2 Where :math:`J_1` is the first order Bessel function of the first kind, :math:`r` is radial distance from the maximum of the Airy function (:math:`r = \\sqrt{(x - x_0)^2 + (y - y_0)^2}`), :math:`R` is the input ``radius`` parameter, and :math:`R_z = 1.2196698912665045`). For an optical system, the radius of the first zero represents the limiting angular resolution and is approximately 1.22 * lambda / D, where lambda is the wavelength of the light and D is the diameter of the aperture. See [1]_ for more details about the Airy disk. References ---------- .. [1] https://en.wikipedia.org/wiki/Airy_disk """ amplitude = Parameter(default=1, description="Amplitude (peak value) of the Airy function") x_0 = Parameter(default=0, description="X position of the peak") y_0 = Parameter(default=0, description="Y position of the peak") radius = Parameter(default=1, description="The radius of the Airy disk (radius of first zero crossing)") _rz = None _j1 = None @classmethod def evaluate(cls, x, y, amplitude, x_0, y_0, radius): """Two dimensional Airy model function""" if cls._rz is None: from scipy.special import j1, jn_zeros cls._rz = jn_zeros(1, 1)[0] / np.pi cls._j1 = j1 r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) / (radius / cls._rz) if isinstance(r, Quantity): # scipy function cannot handle Quantity, so turn into array. r = r.to_value(u.dimensionless_unscaled) # Since r can be zero, we have to take care to treat that case # separately so as not to raise a numpy warning z = np.ones(r.shape) rt = np.pi * r[r > 0] z[r > 0] = (2.0 * cls._j1(rt) / rt) ** 2 if isinstance(amplitude, Quantity): # make z quantity too, otherwise in-place multiplication fails. z = Quantity(z, u.dimensionless_unscaled, copy=False) z *= amplitude return z @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return {'x_0': inputs_unit[self.inputs[0]], 'y_0': inputs_unit[self.inputs[0]], 'radius': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class Moffat1D(Fittable1DModel): """ One dimensional Moffat model. Parameters ---------- amplitude : float Amplitude of the model. x_0 : float x position of the maximum of the Moffat model. gamma : float Core width of the Moffat model. alpha : float Power index of the Moffat model. See Also -------- Gaussian1D, Box1D Notes ----- Model formula: .. math:: f(x) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha} Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Moffat1D plt.figure() s1 = Moffat1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor s1.width = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() """ amplitude = Parameter(default=1, description="Amplitude of the model") x_0 = Parameter(default=0, description="X position of maximum of Moffat model") gamma = Parameter(default=1, description="Core width of Moffat model") alpha = Parameter(default=1, description="Power index of the Moffat model") @property def fwhm(self): """ Moffat full width at half maximum. Derivation of the formula is available in `this notebook by Yoonsoo Bach <https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_. """ return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0) @staticmethod def evaluate(x, amplitude, x_0, gamma, alpha): """One dimensional Moffat model function""" return amplitude * (1 + ((x - x_0) / gamma) ** 2) ** (-alpha) @staticmethod def fit_deriv(x, amplitude, x_0, gamma, alpha): """One dimensional Moffat model derivative with respect to parameters""" fac = (1 + (x - x_0) ** 2 / gamma ** 2) d_A = fac ** (-alpha) d_x_0 = (2 * amplitude * alpha * (x - x_0) * d_A / (fac * gamma ** 2)) d_gamma = (2 * amplitude * alpha * (x - x_0) ** 2 * d_A / (fac * gamma ** 3)) d_alpha = -amplitude * d_A * np.log(fac) return [d_A, d_x_0, d_gamma, d_alpha] @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_0': inputs_unit[self.inputs[0]], 'gamma': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class Moffat2D(Fittable2DModel): """ Two dimensional Moffat model. Parameters ---------- amplitude : float Amplitude of the model. x_0 : float x position of the maximum of the Moffat model. y_0 : float y position of the maximum of the Moffat model. gamma : float Core width of the Moffat model. alpha : float Power index of the Moffat model. See Also -------- Gaussian2D, Box2D Notes ----- Model formula: .. math:: f(x, y) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2} + \\left(y - y_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha} """ amplitude = Parameter(default=1, description="Amplitude (peak value) of the model") x_0 = Parameter(default=0, description="X position of the maximum of the Moffat model") y_0 = Parameter(default=0, description="Y position of the maximum of the Moffat model") gamma = Parameter(default=1, description="Core width of the Moffat model") alpha = Parameter(default=1, description="Power index of the Moffat model") @property def fwhm(self): """ Moffat full width at half maximum. Derivation of the formula is available in `this notebook by Yoonsoo Bach <https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_. """ return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0) @staticmethod def evaluate(x, y, amplitude, x_0, y_0, gamma, alpha): """Two dimensional Moffat model function""" rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2 return amplitude * (1 + rr_gg) ** (-alpha) @staticmethod def fit_deriv(x, y, amplitude, x_0, y_0, gamma, alpha): """Two dimensional Moffat model derivative with respect to parameters""" rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2 d_A = (1 + rr_gg) ** (-alpha) d_x_0 = (2 * amplitude * alpha * d_A * (x - x_0) / (gamma ** 2 * (1 + rr_gg))) d_y_0 = (2 * amplitude * alpha * d_A * (y - y_0) / (gamma ** 2 * (1 + rr_gg))) d_alpha = -amplitude * d_A * np.log(1 + rr_gg) d_gamma = (2 * amplitude * alpha * d_A * rr_gg / (gamma * (1 + rr_gg))) return [d_A, d_x_0, d_y_0, d_gamma, d_alpha] @property def input_units(self): if self.x_0.unit is None: return None else: return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return {'x_0': inputs_unit[self.inputs[0]], 'y_0': inputs_unit[self.inputs[0]], 'gamma': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class Sersic2D(Fittable2DModel): r""" Two dimensional Sersic surface brightness profile. Parameters ---------- amplitude : float Surface brightness at r_eff. r_eff : float Effective (half-light) radius n : float Sersic Index. x_0 : float, optional x position of the center. y_0 : float, optional y position of the center. ellip : float, optional Ellipticity. theta : float or `~astropy.units.Quantity`, optional The rotation angle as an angular quantity (`~astropy.units.Quantity` or `~astropy.coordinates.Angle`) or a value in radians (as a float). The rotation angle increases counterclockwise from the positive x axis. See Also -------- Gaussian2D, Moffat2D Notes ----- Model formula: .. math:: I(x,y) = I(r) = I_e\exp\left\{ -b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right] \right\} The constant :math:`b_n` is defined such that :math:`r_e` contains half the total luminosity, and can be solved for numerically. .. math:: \Gamma(2n) = 2\gamma (2n,b_n) Examples -------- .. plot:: :include-source: import numpy as np from astropy.modeling.models import Sersic2D import matplotlib.pyplot as plt x,y = np.meshgrid(np.arange(100), np.arange(100)) mod = Sersic2D(amplitude = 1, r_eff = 25, n=4, x_0=50, y_0=50, ellip=.5, theta=-1) img = mod(x, y) log_img = np.log10(img) plt.figure() plt.imshow(log_img, origin='lower', interpolation='nearest', vmin=-1, vmax=2) plt.xlabel('x') plt.ylabel('y') cbar = plt.colorbar() cbar.set_label('Log Brightness', rotation=270, labelpad=25) cbar.set_ticks([-1, 0, 1, 2], update_ticks=True) plt.show() References ---------- .. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html """ amplitude = Parameter(default=1, description="Surface brightness at r_eff") r_eff = Parameter(default=1, description="Effective (half-light) radius") n = Parameter(default=4, description="Sersic Index") x_0 = Parameter(default=0, description="X position of the center") y_0 = Parameter(default=0, description="Y position of the center") ellip = Parameter(default=0, description="Ellipticity") theta = Parameter(default=0.0, description=("Rotation angle either as a " "float (in radians) or a " "|Quantity| angle")) _gammaincinv = None @classmethod def evaluate(cls, x, y, amplitude, r_eff, n, x_0, y_0, ellip, theta): """Two dimensional Sersic profile function.""" if cls._gammaincinv is None: from scipy.special import gammaincinv cls._gammaincinv = gammaincinv bn = cls._gammaincinv(2. * n, 0.5) a, b = r_eff, (1 - ellip) * r_eff cos_theta, sin_theta = np.cos(theta), np.sin(theta) x_maj = (x - x_0) * cos_theta + (y - y_0) * sin_theta x_min = -(x - x_0) * sin_theta + (y - y_0) * cos_theta z = np.sqrt((x_maj / a) ** 2 + (x_min / b) ** 2) return amplitude * np.exp(-bn * (z ** (1 / n) - 1)) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return {'x_0': inputs_unit[self.inputs[0]], 'y_0': inputs_unit[self.inputs[0]], 'r_eff': inputs_unit[self.inputs[0]], 'theta': u.rad, 'amplitude': outputs_unit[self.outputs[0]]} class KingProjectedAnalytic1D(Fittable1DModel): """ Projected (surface density) analytic King Model. Parameters ---------- amplitude : float Amplitude or scaling factor. r_core : float Core radius (f(r_c) ~ 0.5 f_0) r_tide : float Tidal radius. Notes ----- This model approximates a King model with an analytic function. The derivation of this equation can be found in King '62 (equation 14). This is just an approximation of the full model and the parameters derived from this model should be taken with caution. It usually works for models with a concentration (c = log10(r_t/r_c) parameter < 2. Model formula: .. math:: f(x) = A r_c^2 \\left(\\frac{1}{\\sqrt{(x^2 + r_c^2)}} - \\frac{1}{\\sqrt{(r_t^2 + r_c^2)}}\\right)^2 Examples -------- .. plot:: :include-source: import numpy as np from astropy.modeling.models import KingProjectedAnalytic1D import matplotlib.pyplot as plt plt.figure() rt_list = [1, 2, 5, 10, 20] for rt in rt_list: r = np.linspace(0.1, rt, 100) mod = KingProjectedAnalytic1D(amplitude = 1, r_core = 1., r_tide = rt) sig = mod(r) plt.loglog(r, sig/sig[0], label=f"c ~ {mod.concentration:0.2f}") plt.xlabel("r") plt.ylabel(r"$\\sigma/\\sigma_0$") plt.legend() plt.show() References ---------- .. [1] https://ui.adsabs.harvard.edu/abs/1962AJ.....67..471K """ amplitude = Parameter(default=1, bounds=(FLOAT_EPSILON, None), description="Amplitude or scaling factor") r_core = Parameter(default=1, bounds=(FLOAT_EPSILON, None), description="Core Radius") r_tide = Parameter(default=2, bounds=(FLOAT_EPSILON, None), description="Tidal Radius") @property def concentration(self): """Concentration parameter of the king model""" return np.log10(np.abs(self.r_tide/self.r_core)) @staticmethod def evaluate(x, amplitude, r_core, r_tide): """ Analytic King model function. """ result = amplitude * r_core ** 2 * (1/np.sqrt(x ** 2 + r_core ** 2) - 1/np.sqrt(r_tide ** 2 + r_core ** 2)) ** 2 # Set invalid r values to 0 bounds = (x >= r_tide) | (x < 0) result[bounds] = result[bounds] * 0. return result @staticmethod def fit_deriv(x, amplitude, r_core, r_tide): """ Analytic King model function derivatives. """ d_amplitude = r_core ** 2 * (1/np.sqrt(x ** 2 + r_core ** 2) - 1/np.sqrt(r_tide ** 2 + r_core ** 2)) ** 2 d_r_core = 2 * amplitude * r_core ** 2 * (r_core/(r_core ** 2 + r_tide ** 2) ** (3/2) - r_core/(r_core ** 2 + x ** 2) ** (3/2)) * \ (1./np.sqrt(r_core ** 2 + x ** 2) - 1./np.sqrt(r_core ** 2 + r_tide ** 2)) + \ 2 * amplitude * r_core * (1./np.sqrt(r_core ** 2 + x ** 2) - 1./np.sqrt(r_core ** 2 + r_tide ** 2)) ** 2 d_r_tide = (2 * amplitude * r_core ** 2 * r_tide * (1./np.sqrt(r_core ** 2 + x ** 2) - 1./np.sqrt(r_core ** 2 + r_tide ** 2)))/(r_core ** 2 + r_tide ** 2) ** (3/2) # Set invalid r values to 0 bounds = (x >= r_tide) | (x < 0) d_amplitude[bounds] = d_amplitude[bounds]*0 d_r_core[bounds] = d_r_core[bounds]*0 d_r_tide[bounds] = d_r_tide[bounds]*0 return [d_amplitude, d_r_core, d_r_tide] @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits. The model is not defined for r > r_tide. ``(r_low, r_high)`` """ return (0 * self.r_tide, 1 * self.r_tide) @property def input_units(self): if self.r_core.unit is None: return None return {self.inputs[0]: self.r_core.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'r_core': inputs_unit[self.inputs[0]], 'r_tide': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class Logarithmic1D(Fittable1DModel): """ One dimensional logarithmic model. Parameters ---------- amplitude : float, optional tau : float, optional See Also -------- Exponential1D, Gaussian1D """ amplitude = Parameter(default=1) tau = Parameter(default=1) @staticmethod def evaluate(x, amplitude, tau): return amplitude * np.log(x / tau) @staticmethod def fit_deriv(x, amplitude, tau): d_amplitude = np.log(x / tau) d_tau = np.zeros(x.shape) - (amplitude / tau) return [d_amplitude, d_tau] @property def inverse(self): new_amplitude = self.tau new_tau = self.amplitude return Exponential1D(amplitude=new_amplitude, tau=new_tau) @tau.validator def tau(self, val): if np.all(val == 0): raise ValueError("0 is not an allowed value for tau") @property def input_units(self): if self.tau.unit is None: return None return {self.inputs[0]: self.tau.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'tau': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class Exponential1D(Fittable1DModel): """ One dimensional exponential model. Parameters ---------- amplitude : float, optional tau : float, optional See Also -------- Logarithmic1D, Gaussian1D """ amplitude = Parameter(default=1) tau = Parameter(default=1) @staticmethod def evaluate(x, amplitude, tau): return amplitude * np.exp(x / tau) @staticmethod def fit_deriv(x, amplitude, tau): ''' Derivative with respect to parameters''' d_amplitude = np.exp(x / tau) d_tau = -amplitude * (x / tau**2) * np.exp(x / tau) return [d_amplitude, d_tau] @property def inverse(self): new_amplitude = self.tau new_tau = self.amplitude return Logarithmic1D(amplitude=new_amplitude, tau=new_tau) @tau.validator def tau(self, val): ''' tau cannot be 0''' if np.all(val == 0): raise ValueError("0 is not an allowed value for tau") @property def input_units(self): if self.tau.unit is None: return None return {self.inputs[0]: self.tau.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'tau': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]}
fd2d04b2c3dff04640ef09a87c182f606c1e08c2edc2c8b297c23b3d1866a50e
# Licensed under a 3-clause BSD style license - see LICENSE.rst # -*- coding: utf-8 -*- # pylint: disable=invalid-name """ Implements projections--particularly sky projections defined in WCS Paper II [1]_. All angles are set and and displayed in degrees but internally computations are performed in radians. All functions expect inputs and outputs degrees. References ---------- .. [1] Calabretta, M.R., Greisen, E.W., 2002, A&A, 395, 1077 (Paper II) """ import abc from itertools import chain, product import numpy as np from astropy import units as u from astropy import wcs from .core import Model from .parameters import InputParameterError, Parameter from .utils import _to_orig_unit, _to_radian # List of tuples of the form # (long class name without suffix, short WCSLIB projection code): _PROJ_NAME_CODE = [ ('ZenithalPerspective', 'AZP'), ('SlantZenithalPerspective', 'SZP'), ('Gnomonic', 'TAN'), ('Stereographic', 'STG'), ('SlantOrthographic', 'SIN'), ('ZenithalEquidistant', 'ARC'), ('ZenithalEqualArea', 'ZEA'), ('Airy', 'AIR'), ('CylindricalPerspective', 'CYP'), ('CylindricalEqualArea', 'CEA'), ('PlateCarree', 'CAR'), ('Mercator', 'MER'), ('SansonFlamsteed', 'SFL'), ('Parabolic', 'PAR'), ('Molleweide', 'MOL'), ('HammerAitoff', 'AIT'), ('ConicPerspective', 'COP'), ('ConicEqualArea', 'COE'), ('ConicEquidistant', 'COD'), ('ConicOrthomorphic', 'COO'), ('BonneEqualArea', 'BON'), ('Polyconic', 'PCO'), ('TangentialSphericalCube', 'TSC'), ('COBEQuadSphericalCube', 'CSC'), ('QuadSphericalCube', 'QSC'), ('HEALPix', 'HPX'), ('HEALPixPolar', 'XPH'), ] _NOT_SUPPORTED_PROJ_CODES = ['ZPN'] _PROJ_NAME_CODE_MAP = dict(_PROJ_NAME_CODE) projcodes = [code for _, code in _PROJ_NAME_CODE] __all__ = [ 'Projection', 'Pix2SkyProjection', 'Sky2PixProjection', 'Zenithal', 'Cylindrical', 'PseudoCylindrical', 'Conic', 'PseudoConic', 'QuadCube', 'HEALPix', 'AffineTransformation2D', 'projcodes' ] + list(map('_'.join, product(['Pix2Sky', 'Sky2Pix'], chain(*_PROJ_NAME_CODE)))) class _ParameterDS(Parameter): """ Same as `Parameter` but can indicate its modified status via the ``dirty`` property. This flag also gets set automatically when a parameter is modified. This ability to track parameter's modified status is needed for automatic update of WCSLIB's prjprm structure (which may be a more-time intensive operation) *only as required*. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.dirty = True def validate(self, value): super().validate(value) self.dirty = True class Projection(Model): """Base class for all sky projections.""" # Radius of the generating sphere. # This sets the circumference to 360 deg so that arc length is measured in deg. r0 = 180 * u.deg / np.pi _separable = False def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._prj = wcs.Prjprm() @property @abc.abstractmethod def inverse(self): """ Inverse projection--all projection models must provide an inverse. """ @property def prjprm(self): """ WCSLIB ``prjprm`` structure. """ self._update_prj() return self._prj def _update_prj(self): """ A default updater for projection's pv. .. warning:: This method assumes that PV0 is never modified. If a projection that uses PV0 is ever implemented in this module, that projection class should override this method. .. warning:: This method assumes that the order in which PVi values (i>0) are to be asigned is identical to the order of model parameters in ``param_names``. That is, pv[1] = model.parameters[0], ... """ if not self.param_names: return pv = [] dirty = False for p in self.param_names: param = getattr(self, p) pv.append(float(param.value)) dirty |= param.dirty param.dirty = False if dirty: self._prj.pv = None, *pv self._prj.set() class Pix2SkyProjection(Projection): """Base class for all Pix2Sky projections.""" n_inputs = 2 n_outputs = 2 _input_units_strict = True _input_units_allow_dimensionless = True def __new__(cls, *args, **kwargs): long_name = cls.name.split('_')[1] cls.prj_code = _PROJ_NAME_CODE_MAP[long_name] return super(Pix2SkyProjection, cls).__new__(cls) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._prj.code = self.prj_code self._update_prj() if not self.param_names: # force initial call to Prjprm.set() for projections # with no parameters: self._prj.set() self.inputs = ('x', 'y') self.outputs = ('phi', 'theta') @property def input_units(self): return {self.inputs[0]: u.deg, self.inputs[1]: u.deg} @property def return_units(self): return {self.outputs[0]: u.deg, self.outputs[1]: u.deg} def evaluate(self, x, y, *args, **kwargs): self._update_prj() return self._prj.prjx2s(x, y) @property def inverse(self): pv = [getattr(self, param).value for param in self.param_names] return self._inv_cls(*pv) class Sky2PixProjection(Projection): """Base class for all Sky2Pix projections.""" n_inputs = 2 n_outputs = 2 _input_units_strict = True _input_units_allow_dimensionless = True def __new__(cls, *args, **kwargs): long_name = cls.name.split('_')[1] cls.prj_code = _PROJ_NAME_CODE_MAP[long_name] return super(Sky2PixProjection, cls).__new__(cls) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._prj.code = self.prj_code self._update_prj() if not self.param_names: # force initial call to Prjprm.set() for projections # without parameters: self._prj.set() self.inputs = ('phi', 'theta') self.outputs = ('x', 'y') @property def input_units(self): return {self.inputs[0]: u.deg, self.inputs[1]: u.deg} @property def return_units(self): return {self.outputs[0]: u.deg, self.outputs[1]: u.deg} def evaluate(self, phi, theta, *args, **kwargs): self._update_prj() return self._prj.prjs2x(phi, theta) @property def inverse(self): pv = [getattr(self, param).value for param in self.param_names] return self._inv_cls(*pv) class Zenithal(Projection): r"""Base class for all Zenithal projections. Zenithal (or azimuthal) projections map the sphere directly onto a plane. All zenithal projections are specified by defining the radius as a function of native latitude, :math:`R_\theta`. The pixel-to-sky transformation is defined as: .. math:: \phi &= \arg(-y, x) \\ R_\theta &= \sqrt{x^2 + y^2} and the inverse (sky-to-pixel) is defined as: .. math:: x &= R_\theta \sin \phi \\ y &= R_\theta \cos \phi """ class Pix2Sky_ZenithalPerspective(Pix2SkyProjection, Zenithal): r""" Zenithal perspective projection - pixel to sky. Corresponds to the ``AZP`` projection in FITS WCS. .. math:: \phi &= \arg(-y \cos \gamma, x) \\ \theta &= \left\{\genfrac{}{}{0pt}{}{\psi - \omega}{\psi + \omega + 180^{\circ}}\right. where: .. math:: \psi &= \arg(\rho, 1) \\ \omega &= \sin^{-1}\left(\frac{\rho \mu}{\sqrt{\rho^2 + 1}}\right) \\ \rho &= \frac{R}{\frac{180^{\circ}}{\pi}(\mu + 1) + y \sin \gamma} \\ R &= \sqrt{x^2 + y^2 \cos^2 \gamma} Parameters ---------- mu : float Distance from point of projection to center of sphere in spherical radii, μ. Default is 0. gamma : float Look angle γ in degrees. Default is 0°. """ mu = _ParameterDS( default=0.0, description="Distance from point of projection to center of sphere" ) gamma = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian, description="Look angle γ in degrees (Default = 0°)") @mu.validator def mu(self, value): if np.any(np.equal(value, -1.0)): raise InputParameterError( "Zenithal perspective projection is not defined for mu = -1") class Sky2Pix_ZenithalPerspective(Sky2PixProjection, Zenithal): r""" Zenithal perspective projection - sky to pixel. Corresponds to the ``AZP`` projection in FITS WCS. .. math:: x &= R \sin \phi \\ y &= -R \sec \gamma \cos \theta where: .. math:: R = \frac{180^{\circ}}{\pi} \frac{(\mu + 1) \cos \theta} {(\mu + \sin \theta) + \cos \theta \cos \phi \tan \gamma} Parameters ---------- mu : float Distance from point of projection to center of sphere in spherical radii, μ. Default is 0. gamma : float Look angle γ in degrees. Default is 0°. """ mu = _ParameterDS( default=0.0, description="Distance from point of projection to center of sphere" ) gamma = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian, description="Look angle γ in degrees (Default=0°)") @mu.validator def mu(self, value): if np.any(np.equal(value, -1.0)): raise InputParameterError( "Zenithal perspective projection is not defined for mu = -1") class Pix2Sky_SlantZenithalPerspective(Pix2SkyProjection, Zenithal): r""" Slant zenithal perspective projection - pixel to sky. Corresponds to the ``SZP`` projection in FITS WCS. Parameters ---------- mu : float Distance from point of projection to center of sphere in spherical radii, μ. Default is 0. phi0 : float The longitude φ₀ of the reference point, in degrees. Default is 0°. theta0 : float The latitude θ₀ of the reference point, in degrees. Default is 90°. """ mu = _ParameterDS( default=0.0, description="Distance from point of projection to center of sphere" ) phi0 = _ParameterDS( default=0.0, getter=_to_orig_unit, setter=_to_radian, description="The longitude φ₀ of the reference point in degrees (Default=0°)" ) theta0 = _ParameterDS( default=90.0, getter=_to_orig_unit, setter=_to_radian, description="The latitude θ₀ of the reference point, in degrees (Default=0°)" ) @mu.validator def mu(self, value): if np.any(np.equal(value, -1.0)): raise InputParameterError( "Zenithal perspective projection is not defined for mu = -1") class Sky2Pix_SlantZenithalPerspective(Sky2PixProjection, Zenithal): r""" Zenithal perspective projection - sky to pixel. Corresponds to the ``SZP`` projection in FITS WCS. Parameters ---------- mu : float distance from point of projection to center of sphere in spherical radii, μ. Default is 0. phi0 : float The longitude φ₀ of the reference point, in degrees. Default is 0°. theta0 : float The latitude θ₀ of the reference point, in degrees. Default is 90°. """ mu = _ParameterDS( default=0.0, description="Distance from point of projection to center of sphere" ) phi0 = _ParameterDS( default=0.0, getter=_to_orig_unit, setter=_to_radian, description="The longitude φ₀ of the reference point in degrees" ) theta0 = _ParameterDS( default=0.0, getter=_to_orig_unit, setter=_to_radian, description="The latitude θ₀ of the reference point, in degrees" ) @mu.validator def mu(self, value): if np.any(np.equal(value, -1.0)): raise InputParameterError( "Zenithal perspective projection is not defined for mu = -1") class Pix2Sky_Gnomonic(Pix2SkyProjection, Zenithal): r""" Gnomonic projection - pixel to sky. Corresponds to the ``TAN`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: \theta = \tan^{-1}\left(\frac{180^{\circ}}{\pi R_\theta}\right) """ class Sky2Pix_Gnomonic(Sky2PixProjection, Zenithal): r""" Gnomonic Projection - sky to pixel. Corresponds to the ``TAN`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: R_\theta = \frac{180^{\circ}}{\pi}\cot \theta """ class Pix2Sky_Stereographic(Pix2SkyProjection, Zenithal): r""" Stereographic Projection - pixel to sky. Corresponds to the ``STG`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: \theta = 90^{\circ} - 2 \tan^{-1}\left(\frac{\pi R_\theta}{360^{\circ}}\right) """ class Sky2Pix_Stereographic(Sky2PixProjection, Zenithal): r""" Stereographic Projection - sky to pixel. Corresponds to the ``STG`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: R_\theta = \frac{180^{\circ}}{\pi}\frac{2 \cos \theta}{1 + \sin \theta} """ class Pix2Sky_SlantOrthographic(Pix2SkyProjection, Zenithal): r""" Slant orthographic projection - pixel to sky. Corresponds to the ``SIN`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. The following transformation applies when :math:`\xi` and :math:`\eta` are both zero. .. math:: \theta = \cos^{-1}\left(\frac{\pi}{180^{\circ}}R_\theta\right) The parameters :math:`\xi` and :math:`\eta` are defined from the reference point :math:`(\phi_c, \theta_c)` as: .. math:: \xi &= \cot \theta_c \sin \phi_c \\ \eta &= - \cot \theta_c \cos \phi_c Parameters ---------- xi : float Obliqueness parameter, ξ. Default is 0.0. eta : float Obliqueness parameter, η. Default is 0.0. """ xi = _ParameterDS(default=0.0, description="Obliqueness parameter") eta = _ParameterDS(default=0.0, description="Obliqueness parameter") class Sky2Pix_SlantOrthographic(Sky2PixProjection, Zenithal): r""" Slant orthographic projection - sky to pixel. Corresponds to the ``SIN`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. The following transformation applies when :math:`\xi` and :math:`\eta` are both zero. .. math:: R_\theta = \frac{180^{\circ}}{\pi}\cos \theta But more specifically are: .. math:: x &= \frac{180^\circ}{\pi}[\cos \theta \sin \phi + \xi(1 - \sin \theta)] \\ y &= \frac{180^\circ}{\pi}[\cos \theta \cos \phi + \eta(1 - \sin \theta)] """ xi = _ParameterDS(default=0.0) eta = _ParameterDS(default=0.0) class Pix2Sky_ZenithalEquidistant(Pix2SkyProjection, Zenithal): r""" Zenithal equidistant projection - pixel to sky. Corresponds to the ``ARC`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: \theta = 90^\circ - R_\theta """ class Sky2Pix_ZenithalEquidistant(Sky2PixProjection, Zenithal): r""" Zenithal equidistant projection - sky to pixel. Corresponds to the ``ARC`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: R_\theta = 90^\circ - \theta """ class Pix2Sky_ZenithalEqualArea(Pix2SkyProjection, Zenithal): r""" Zenithal equidistant projection - pixel to sky. Corresponds to the ``ZEA`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: \theta = 90^\circ - 2 \sin^{-1} \left(\frac{\pi R_\theta}{360^\circ}\right) """ class Sky2Pix_ZenithalEqualArea(Sky2PixProjection, Zenithal): r""" Zenithal equidistant projection - sky to pixel. Corresponds to the ``ZEA`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: R_\theta &= \frac{180^\circ}{\pi} \sqrt{2(1 - \sin\theta)} \\ &= \frac{360^\circ}{\pi} \sin\left(\frac{90^\circ - \theta}{2}\right) """ class Pix2Sky_Airy(Pix2SkyProjection, Zenithal): r""" Airy projection - pixel to sky. Corresponds to the ``AIR`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. Parameters ---------- theta_b : float The latitude :math:`\theta_b` at which to minimize the error, in degrees. Default is 90°. """ theta_b = _ParameterDS(default=90.0) class Sky2Pix_Airy(Sky2PixProjection, Zenithal): r""" Airy - sky to pixel. Corresponds to the ``AIR`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: R_\theta = -2 \frac{180^\circ}{\pi}\left(\frac{\ln(\cos \xi)}{\tan \xi} + \frac{\ln(\cos \xi_b)}{\tan^2 \xi_b} \tan \xi \right) where: .. math:: \xi &= \frac{90^\circ - \theta}{2} \\ \xi_b &= \frac{90^\circ - \theta_b}{2} Parameters ---------- theta_b : float The latitude :math:`\theta_b` at which to minimize the error, in degrees. Default is 90°. """ theta_b = _ParameterDS(default=90.0, description="The latitude at which to minimize the error,in degrees") class Cylindrical(Projection): r"""Base class for Cylindrical projections. Cylindrical projections are so-named because the surface of projection is a cylinder. """ _separable = True class Pix2Sky_CylindricalPerspective(Pix2SkyProjection, Cylindrical): r""" Cylindrical perspective - pixel to sky. Corresponds to the ``CYP`` projection in FITS WCS. .. math:: \phi &= \frac{x}{\lambda} \\ \theta &= \arg(1, \eta) + \sin{-1}\left(\frac{\eta \mu}{\sqrt{\eta^2 + 1}}\right) where: .. math:: \eta = \frac{\pi}{180^{\circ}}\frac{y}{\mu + \lambda} Parameters ---------- mu : float Distance from center of sphere in the direction opposite the projected surface, in spherical radii, μ. Default is 1. lam : float Radius of the cylinder in spherical radii, λ. Default is 1. """ mu = _ParameterDS(default=1.0) lam = _ParameterDS(default=1.0) @mu.validator def mu(self, value): if np.any(value == -self.lam): raise InputParameterError( "CYP projection is not defined for mu = -lambda") @lam.validator def lam(self, value): if np.any(value == -self.mu): raise InputParameterError( "CYP projection is not defined for lambda = -mu") class Sky2Pix_CylindricalPerspective(Sky2PixProjection, Cylindrical): r""" Cylindrical Perspective - sky to pixel. Corresponds to the ``CYP`` projection in FITS WCS. .. math:: x &= \lambda \phi \\ y &= \frac{180^{\circ}}{\pi}\left(\frac{\mu + \lambda}{\mu + \cos \theta}\right)\sin \theta Parameters ---------- mu : float Distance from center of sphere in the direction opposite the projected surface, in spherical radii, μ. Default is 0. lam : float Radius of the cylinder in spherical radii, λ. Default is 0. """ mu = _ParameterDS(default=1.0, description="Distance from center of sphere in spherical radii") lam = _ParameterDS(default=1.0, description="Radius of the cylinder in spherical radii") @mu.validator def mu(self, value): if np.any(value == -self.lam): raise InputParameterError( "CYP projection is not defined for mu = -lambda") @lam.validator def lam(self, value): if np.any(value == -self.mu): raise InputParameterError( "CYP projection is not defined for lambda = -mu") class Pix2Sky_CylindricalEqualArea(Pix2SkyProjection, Cylindrical): r""" Cylindrical equal area projection - pixel to sky. Corresponds to the ``CEA`` projection in FITS WCS. .. math:: \phi &= x \\ \theta &= \sin^{-1}\left(\frac{\pi}{180^{\circ}}\lambda y\right) Parameters ---------- lam : float Radius of the cylinder in spherical radii, λ. Default is 1. """ lam = _ParameterDS(default=1) class Sky2Pix_CylindricalEqualArea(Sky2PixProjection, Cylindrical): r""" Cylindrical equal area projection - sky to pixel. Corresponds to the ``CEA`` projection in FITS WCS. .. math:: x &= \phi \\ y &= \frac{180^{\circ}}{\pi}\frac{\sin \theta}{\lambda} Parameters ---------- lam : float Radius of the cylinder in spherical radii, λ. Default is 0. """ lam = _ParameterDS(default=1) class Pix2Sky_PlateCarree(Pix2SkyProjection, Cylindrical): r""" Plate carrée projection - pixel to sky. Corresponds to the ``CAR`` projection in FITS WCS. .. math:: \phi &= x \\ \theta &= y """ @staticmethod def evaluate(x, y): # The intermediate variables are only used here for clarity phi = np.array(x) theta = np.array(y) return phi, theta class Sky2Pix_PlateCarree(Sky2PixProjection, Cylindrical): r""" Plate carrée projection - sky to pixel. Corresponds to the ``CAR`` projection in FITS WCS. .. math:: x &= \phi \\ y &= \theta """ @staticmethod def evaluate(phi, theta): # The intermediate variables are only used here for clarity x = np.array(phi) y = np.array(theta) return x, y class Pix2Sky_Mercator(Pix2SkyProjection, Cylindrical): r""" Mercator - pixel to sky. Corresponds to the ``MER`` projection in FITS WCS. .. math:: \phi &= x \\ \theta &= 2 \tan^{-1}\left(e^{y \pi / 180^{\circ}}\right)-90^{\circ} """ class Sky2Pix_Mercator(Sky2PixProjection, Cylindrical): r""" Mercator - sky to pixel. Corresponds to the ``MER`` projection in FITS WCS. .. math:: x &= \phi \\ y &= \frac{180^{\circ}}{\pi}\ln \tan \left(\frac{90^{\circ} + \theta}{2}\right) """ class PseudoCylindrical(Projection): r"""Base class for pseudocylindrical projections. Pseudocylindrical projections are like cylindrical projections except the parallels of latitude are projected at diminishing lengths toward the polar regions in order to reduce lateral distortion there. Consequently, the meridians are curved. """ _separable = True class Pix2Sky_SansonFlamsteed(Pix2SkyProjection, PseudoCylindrical): r""" Sanson-Flamsteed projection - pixel to sky. Corresponds to the ``SFL`` projection in FITS WCS. .. math:: \phi &= \frac{x}{\cos y} \\ \theta &= y """ class Sky2Pix_SansonFlamsteed(Sky2PixProjection, PseudoCylindrical): r""" Sanson-Flamsteed projection - sky to pixel. Corresponds to the ``SFL`` projection in FITS WCS. .. math:: x &= \phi \cos \theta \\ y &= \theta """ class Pix2Sky_Parabolic(Pix2SkyProjection, PseudoCylindrical): r""" Parabolic projection - pixel to sky. Corresponds to the ``PAR`` projection in FITS WCS. .. math:: \phi &= \frac{180^\circ}{\pi} \frac{x}{1 - 4(y / 180^\circ)^2} \\ \theta &= 3 \sin^{-1}\left(\frac{y}{180^\circ}\right) """ class Sky2Pix_Parabolic(Sky2PixProjection, PseudoCylindrical): r""" Parabolic projection - sky to pixel. Corresponds to the ``PAR`` projection in FITS WCS. .. math:: x &= \phi \left(2\cos\frac{2\theta}{3} - 1\right) \\ y &= 180^\circ \sin \frac{\theta}{3} """ class Pix2Sky_Molleweide(Pix2SkyProjection, PseudoCylindrical): r""" Molleweide's projection - pixel to sky. Corresponds to the ``MOL`` projection in FITS WCS. .. math:: \phi &= \frac{\pi x}{2 \sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2}} \\ \theta &= \sin^{-1}\left( \frac{1}{90^\circ}\sin^{-1}\left(\frac{\pi}{180^\circ}\frac{y}{\sqrt{2}}\right) + \frac{y}{180^\circ}\sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2} \right) """ class Sky2Pix_Molleweide(Sky2PixProjection, PseudoCylindrical): r""" Molleweide's projection - sky to pixel. Corresponds to the ``MOL`` projection in FITS WCS. .. math:: x &= \frac{2 \sqrt{2}}{\pi} \phi \cos \gamma \\ y &= \sqrt{2} \frac{180^\circ}{\pi} \sin \gamma where :math:`\gamma` is defined as the solution of the transcendental equation: .. math:: \sin \theta = \frac{\gamma}{90^\circ} + \frac{\sin 2 \gamma}{\pi} """ class Pix2Sky_HammerAitoff(Pix2SkyProjection, PseudoCylindrical): r""" Hammer-Aitoff projection - pixel to sky. Corresponds to the ``AIT`` projection in FITS WCS. .. math:: \phi &= 2 \arg \left(2Z^2 - 1, \frac{\pi}{180^\circ} \frac{Z}{2}x\right) \\ \theta &= \sin^{-1}\left(\frac{\pi}{180^\circ}yZ\right) """ class Sky2Pix_HammerAitoff(Sky2PixProjection, PseudoCylindrical): r""" Hammer-Aitoff projection - sky to pixel. Corresponds to the ``AIT`` projection in FITS WCS. .. math:: x &= 2 \gamma \cos \theta \sin \frac{\phi}{2} \\ y &= \gamma \sin \theta where: .. math:: \gamma = \frac{180^\circ}{\pi} \sqrt{\frac{2}{1 + \cos \theta \cos(\phi / 2)}} """ class Conic(Projection): r"""Base class for conic projections. In conic projections, the sphere is thought to be projected onto the surface of a cone which is then opened out. In a general sense, the pixel-to-sky transformation is defined as: .. math:: \phi &= \arg\left(\frac{Y_0 - y}{R_\theta}, \frac{x}{R_\theta}\right) / C \\ R_\theta &= \mathrm{sign} \theta_a \sqrt{x^2 + (Y_0 - y)^2} and the inverse (sky-to-pixel) is defined as: .. math:: x &= R_\theta \sin (C \phi) \\ y &= R_\theta \cos (C \phi) + Y_0 where :math:`C` is the "constant of the cone": .. math:: C = \frac{180^\circ \cos \theta}{\pi R_\theta} """ sigma = _ParameterDS(default=90.0, getter=_to_orig_unit, setter=_to_radian) delta = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian) class Pix2Sky_ConicPerspective(Pix2SkyProjection, Conic): r""" Colles' conic perspective projection - pixel to sky. Corresponds to the ``COP`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \sin \theta_a \\ R_\theta &= \frac{180^\circ}{\pi} \cos \eta [ \cot \theta_a - \tan(\theta - \theta_a)] \\ Y_0 &= \frac{180^\circ}{\pi} \cos \eta \cot \theta_a Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class Sky2Pix_ConicPerspective(Sky2PixProjection, Conic): r""" Colles' conic perspective projection - sky to pixel. Corresponds to the ``COP`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \sin \theta_a \\ R_\theta &= \frac{180^\circ}{\pi} \cos \eta [ \cot \theta_a - \tan(\theta - \theta_a)] \\ Y_0 &= \frac{180^\circ}{\pi} \cos \eta \cot \theta_a Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class Pix2Sky_ConicEqualArea(Pix2SkyProjection, Conic): r""" Alber's conic equal area projection - pixel to sky. Corresponds to the ``COE`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \gamma / 2 \\ R_\theta &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin \theta} \\ Y_0 &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin((\theta_1 + \theta_2)/2)} where: .. math:: \gamma = \sin \theta_1 + \sin \theta_2 Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class Sky2Pix_ConicEqualArea(Sky2PixProjection, Conic): r""" Alber's conic equal area projection - sky to pixel. Corresponds to the ``COE`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \gamma / 2 \\ R_\theta &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin \theta} \\ Y_0 &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin((\theta_1 + \theta_2)/2)} where: .. math:: \gamma = \sin \theta_1 + \sin \theta_2 Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class Pix2Sky_ConicEquidistant(Pix2SkyProjection, Conic): r""" Conic equidistant projection - pixel to sky. Corresponds to the ``COD`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \frac{180^\circ}{\pi} \frac{\sin\theta_a\sin\eta}{\eta} \\ R_\theta &= \theta_a - \theta + \eta\cot\eta\cot\theta_a \\ Y_0 = \eta\cot\eta\cot\theta_a Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class Sky2Pix_ConicEquidistant(Sky2PixProjection, Conic): r""" Conic equidistant projection - sky to pixel. Corresponds to the ``COD`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \frac{180^\circ}{\pi} \frac{\sin\theta_a\sin\eta}{\eta} \\ R_\theta &= \theta_a - \theta + \eta\cot\eta\cot\theta_a \\ Y_0 = \eta\cot\eta\cot\theta_a Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class Pix2Sky_ConicOrthomorphic(Pix2SkyProjection, Conic): r""" Conic orthomorphic projection - pixel to sky. Corresponds to the ``COO`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \frac{\ln \left( \frac{\cos\theta_2}{\cos\theta_1} \right)} {\ln \left[ \frac{\tan\left(\frac{90^\circ-\theta_2}{2}\right)} {\tan\left(\frac{90^\circ-\theta_1}{2}\right)} \right] } \\ R_\theta &= \psi \left[ \tan \left( \frac{90^\circ - \theta}{2} \right) \right]^C \\ Y_0 &= \psi \left[ \tan \left( \frac{90^\circ - \theta_a}{2} \right) \right]^C where: .. math:: \psi = \frac{180^\circ}{\pi} \frac{\cos \theta} {C\left[\tan\left(\frac{90^\circ-\theta}{2}\right)\right]^C} Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class Sky2Pix_ConicOrthomorphic(Sky2PixProjection, Conic): r""" Conic orthomorphic projection - sky to pixel. Corresponds to the ``COO`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \frac{\ln \left( \frac{\cos\theta_2}{\cos\theta_1} \right)} {\ln \left[ \frac{\tan\left(\frac{90^\circ-\theta_2}{2}\right)} {\tan\left(\frac{90^\circ-\theta_1}{2}\right)} \right] } \\ R_\theta &= \psi \left[ \tan \left( \frac{90^\circ - \theta}{2} \right) \right]^C \\ Y_0 &= \psi \left[ \tan \left( \frac{90^\circ - \theta_a}{2} \right) \right]^C where: .. math:: \psi = \frac{180^\circ}{\pi} \frac{\cos \theta} {C\left[\tan\left(\frac{90^\circ-\theta}{2}\right)\right]^C} Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class PseudoConic(Projection): r"""Base class for pseudoconic projections. Pseudoconics are a subclass of conics with concentric parallels. """ class Pix2Sky_BonneEqualArea(Pix2SkyProjection, PseudoConic): r""" Bonne's equal area pseudoconic projection - pixel to sky. Corresponds to the ``BON`` projection in FITS WCS. .. math:: \phi &= \frac{\pi}{180^\circ} A_\phi R_\theta / \cos \theta \\ \theta &= Y_0 - R_\theta where: .. math:: R_\theta &= \mathrm{sign} \theta_1 \sqrt{x^2 + (Y_0 - y)^2} \\ A_\phi &= \arg\left(\frac{Y_0 - y}{R_\theta}, \frac{x}{R_\theta}\right) Parameters ---------- theta1 : float Bonne conformal latitude, in degrees. """ _separable = True theta1 = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian) class Sky2Pix_BonneEqualArea(Sky2PixProjection, PseudoConic): r""" Bonne's equal area pseudoconic projection - sky to pixel. Corresponds to the ``BON`` projection in FITS WCS. .. math:: x &= R_\theta \sin A_\phi \\ y &= -R_\theta \cos A_\phi + Y_0 where: .. math:: A_\phi &= \frac{180^\circ}{\pi R_\theta} \phi \cos \theta \\ R_\theta &= Y_0 - \theta \\ Y_0 &= \frac{180^\circ}{\pi} \cot \theta_1 + \theta_1 Parameters ---------- theta1 : float Bonne conformal latitude, in degrees. """ _separable = True theta1 = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian, description="Bonne conformal latitude, in degrees") class Pix2Sky_Polyconic(Pix2SkyProjection, PseudoConic): r""" Polyconic projection - pixel to sky. Corresponds to the ``PCO`` projection in FITS WCS. """ class Sky2Pix_Polyconic(Sky2PixProjection, PseudoConic): r""" Polyconic projection - sky to pixel. Corresponds to the ``PCO`` projection in FITS WCS. """ class QuadCube(Projection): r"""Base class for quad cube projections. Quadrilateralized spherical cube (quad-cube) projections belong to the class of polyhedral projections in which the sphere is projected onto the surface of an enclosing polyhedron. The six faces of the quad-cube projections are numbered and laid out as:: 0 4 3 2 1 4 3 2 5 """ class Pix2Sky_TangentialSphericalCube(Pix2SkyProjection, QuadCube): r""" Tangential spherical cube projection - pixel to sky. Corresponds to the ``TSC`` projection in FITS WCS. """ class Sky2Pix_TangentialSphericalCube(Sky2PixProjection, QuadCube): r""" Tangential spherical cube projection - sky to pixel. Corresponds to the ``TSC`` projection in FITS WCS. """ class Pix2Sky_COBEQuadSphericalCube(Pix2SkyProjection, QuadCube): r""" COBE quadrilateralized spherical cube projection - pixel to sky. Corresponds to the ``CSC`` projection in FITS WCS. """ class Sky2Pix_COBEQuadSphericalCube(Sky2PixProjection, QuadCube): r""" COBE quadrilateralized spherical cube projection - sky to pixel. Corresponds to the ``CSC`` projection in FITS WCS. """ class Pix2Sky_QuadSphericalCube(Pix2SkyProjection, QuadCube): r""" Quadrilateralized spherical cube projection - pixel to sky. Corresponds to the ``QSC`` projection in FITS WCS. """ class Sky2Pix_QuadSphericalCube(Sky2PixProjection, QuadCube): r""" Quadrilateralized spherical cube projection - sky to pixel. Corresponds to the ``QSC`` projection in FITS WCS. """ class HEALPix(Projection): r"""Base class for HEALPix projections. """ class Pix2Sky_HEALPix(Pix2SkyProjection, HEALPix): r""" HEALPix - pixel to sky. Corresponds to the ``HPX`` projection in FITS WCS. Parameters ---------- H : float The number of facets in longitude direction. X : float The number of facets in latitude direction. """ _separable = True H = _ParameterDS(default=4.0, description="The number of facets in longitude direction.") X = _ParameterDS(default=3.0, description="The number of facets in latitude direction.") class Sky2Pix_HEALPix(Sky2PixProjection, HEALPix): r""" HEALPix projection - sky to pixel. Corresponds to the ``HPX`` projection in FITS WCS. Parameters ---------- H : float The number of facets in longitude direction. X : float The number of facets in latitude direction. """ _separable = True H = _ParameterDS(default=4.0, description="The number of facets in longitude direction.") X = _ParameterDS(default=3.0, description="The number of facets in latitude direction.") class Pix2Sky_HEALPixPolar(Pix2SkyProjection, HEALPix): r""" HEALPix polar, aka "butterfly" projection - pixel to sky. Corresponds to the ``XPH`` projection in FITS WCS. """ class Sky2Pix_HEALPixPolar(Sky2PixProjection, HEALPix): r""" HEALPix polar, aka "butterfly" projection - pixel to sky. Corresponds to the ``XPH`` projection in FITS WCS. """ class AffineTransformation2D(Model): """ Perform an affine transformation in 2 dimensions. Parameters ---------- matrix : array A 2x2 matrix specifying the linear transformation to apply to the inputs translation : array A 2D vector (given as either a 2x1 or 1x2 array) specifying a translation to apply to the inputs """ n_inputs = 2 n_outputs = 2 standard_broadcasting = False _separable = False matrix = Parameter(default=[[1.0, 0.0], [0.0, 1.0]]) translation = Parameter(default=[0.0, 0.0]) @matrix.validator def matrix(self, value): """Validates that the input matrix is a 2x2 2D array.""" if np.shape(value) != (2, 2): raise InputParameterError( "Expected transformation matrix to be a 2x2 array") @translation.validator def translation(self, value): """ Validates that the translation vector is a 2D vector. This allows either a "row" vector or a "column" vector where in the latter case the resultant Numpy array has ``ndim=2`` but the shape is ``(1, 2)``. """ if not ((np.ndim(value) == 1 and np.shape(value) == (2,)) or (np.ndim(value) == 2 and np.shape(value) == (1, 2))): raise InputParameterError( "Expected translation vector to be a 2 element row or column " "vector array") def __init__(self, matrix=matrix, translation=translation, **kwargs): super().__init__(matrix=matrix, translation=translation, **kwargs) self.inputs = ("x", "y") self.outputs = ("x", "y") @property def inverse(self): """ Inverse transformation. Raises `~astropy.modeling.InputParameterError` if the transformation cannot be inverted. """ det = np.linalg.det(self.matrix.value) if det == 0: raise InputParameterError( f"Transformation matrix is singular; {self.__class__.__name__} model does not " "have an inverse") matrix = np.linalg.inv(self.matrix.value) if self.matrix.unit is not None: matrix = matrix * self.matrix.unit # If matrix has unit then translation has unit, so no need to assign it. translation = -np.dot(matrix, self.translation.value) return self.__class__(matrix=matrix, translation=translation) @classmethod def evaluate(cls, x, y, matrix, translation): """ Apply the transformation to a set of 2D Cartesian coordinates given as two lists--one for the x coordinates and one for a y coordinates--or a single coordinate pair. Parameters ---------- x, y : array, float x and y coordinates """ if x.shape != y.shape: raise ValueError("Expected input arrays to have the same shape") shape = x.shape or (1,) # Use asarray to ensure loose the units. inarr = np.vstack([np.asarray(x).ravel(), np.asarray(y).ravel(), np.ones(x.size, x.dtype)]) if inarr.shape[0] != 3 or inarr.ndim != 2: raise ValueError("Incompatible input shapes") augmented_matrix = cls._create_augmented_matrix(matrix, translation) result = np.dot(augmented_matrix, inarr) x, y = result[0], result[1] x.shape = y.shape = shape return x, y @staticmethod def _create_augmented_matrix(matrix, translation): unit = None if any([hasattr(translation, 'unit'), hasattr(matrix, 'unit')]): if not all([hasattr(translation, 'unit'), hasattr(matrix, 'unit')]): raise ValueError("To use AffineTransformation with quantities, " "both matrix and unit need to be quantities.") unit = translation.unit # matrix should have the same units as translation if not (matrix.unit / translation.unit) == u.dimensionless_unscaled: raise ValueError("matrix and translation must have the same units.") augmented_matrix = np.empty((3, 3), dtype=float) augmented_matrix[0:2, 0:2] = matrix augmented_matrix[0:2, 2:].flat = translation augmented_matrix[2] = [0, 0, 1] if unit is not None: return augmented_matrix * unit return augmented_matrix @property def input_units(self): if self.translation.unit is None and self.matrix.unit is None: return None elif self.translation.unit is not None: return dict(zip(self.inputs, [self.translation.unit] * 2)) else: return dict(zip(self.inputs, [self.matrix.unit] * 2)) for long_name, short_name in _PROJ_NAME_CODE: # define short-name projection equivalent classes: globals()['Pix2Sky_' + short_name] = globals()['Pix2Sky_' + long_name] globals()['Sky2Pix_' + short_name] = globals()['Sky2Pix_' + long_name] # set inverse classes: globals()['Pix2Sky_' + long_name]._inv_cls = globals()['Sky2Pix_' + long_name] globals()['Sky2Pix_' + long_name]._inv_cls = globals()['Pix2Sky_' + long_name]
aee55ba2c07686583d13dbc5acee763315ab7ca8c84b3288c17a80765bb3d7fb
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains models representing polynomials and polynomial series. """ # pylint: disable=invalid-name import numpy as np from astropy.utils import check_broadcast, indent from .core import FittableModel, Model from .functional_models import Shift from .parameters import Parameter from .utils import _validate_domain_window, comb, poly_map_domain __all__ = [ 'Chebyshev1D', 'Chebyshev2D', 'Hermite1D', 'Hermite2D', 'InverseSIP', 'Legendre1D', 'Legendre2D', 'Polynomial1D', 'Polynomial2D', 'SIP', 'OrthoPolynomialBase', 'PolynomialModel' ] class PolynomialBase(FittableModel): """ Base class for all polynomial-like models with an arbitrary number of parameters in the form of coefficients. In this case Parameter instances are returned through the class's ``__getattr__`` rather than through class descriptors. """ # Default _param_names list; this will be filled in by the implementation's # __init__ _param_names = () linear = True col_fit_deriv = False @property def param_names(self): """Coefficient names generated based on the model's polynomial degree and number of dimensions. Subclasses should implement this to return parameter names in the desired format. On most `Model` classes this is a class attribute, but for polynomial models it is an instance attribute since each polynomial model instance can have different parameters depending on the degree of the polynomial and the number of dimensions, for example. """ return self._param_names class PolynomialModel(PolynomialBase): """ Base class for polynomial models. Its main purpose is to determine how many coefficients are needed based on the polynomial order and dimension and to provide their default values, names and ordering. """ def __init__(self, degree, n_models=None, model_set_axis=None, name=None, meta=None, **params): self._degree = degree self._order = self.get_num_coeff(self.n_inputs) self._param_names = self._generate_coeff_names(self.n_inputs) if n_models: if model_set_axis is None: model_set_axis = 0 minshape = (1,) * model_set_axis + (n_models,) else: minshape = () for param_name in self._param_names: self._parameters_[param_name] = Parameter(param_name, default=np.zeros(minshape)) super().__init__( n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params) @property def degree(self): """Degree of polynomial.""" return self._degree def get_num_coeff(self, ndim): """ Return the number of coefficients in one parameter set """ if self.degree < 0: raise ValueError("Degree of polynomial must be positive or null") # deg+1 is used to account for the difference between iraf using # degree and numpy using exact degree if ndim != 1: nmixed = comb(self.degree, ndim) else: nmixed = 0 numc = self.degree * ndim + nmixed + 1 return numc def _invlex(self): c = [] lencoeff = self.degree + 1 for i in range(lencoeff): for j in range(lencoeff): if i + j <= self.degree: c.append((j, i)) return c[::-1] def _generate_coeff_names(self, ndim): names = [] if ndim == 1: for n in range(self._order): names.append(f'c{n}') else: for i in range(self.degree + 1): names.append(f'c{i}_{0}') for i in range(1, self.degree + 1): names.append(f'c{0}_{i}') for i in range(1, self.degree): for j in range(1, self.degree): if i + j < self.degree + 1: names.append(f'c{i}_{j}') return tuple(names) class _PolyDomainWindow1D(PolynomialModel): """ This class sets ``domain`` and ``window`` of 1D polynomials. """ def __init__(self, degree, domain=None, window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params): super().__init__( degree, n_models, model_set_axis, name=name, meta=meta, **params) self._set_default_domain_window(domain, window) @property def window(self): return self._window @window.setter def window(self, val): self._window = _validate_domain_window(val) @property def domain(self): return self._domain @domain.setter def domain(self, val): self._domain = _validate_domain_window(val) def _set_default_domain_window(self, domain, window): """ This method sets the ``domain`` and ``window`` attributes on 1D subclasses. """ self._default_domain_window = {'domain': None, 'window': (-1, 1) } self.window = window or (-1, 1) self.domain = domain def __repr__(self): return self._format_repr([self.degree], kwargs={'domain': self.domain, 'window': self.window}, defaults=self._default_domain_window ) def __str__(self): return self._format_str([('Degree', self.degree), ('Domain', self.domain), ('Window', self.window)], self._default_domain_window) class OrthoPolynomialBase(PolynomialBase): """ This is a base class for the 2D Chebyshev and Legendre models. The polynomials implemented here require a maximum degree in x and y. For explanation of ``x_domain``, ``y_domain``, ```x_window`` and ```y_window`` see :ref:`Notes regarding usage of domain and window <astropy:domain-window-note>`. Parameters ---------- x_degree : int degree in x y_degree : int degree in y x_domain : tuple or None, optional domain of the x independent variable x_window : tuple or None, optional range of the x independent variable y_domain : tuple or None, optional domain of the y independent variable y_window : tuple or None, optional range of the y independent variable **params : dict {keyword: value} pairs, representing {parameter_name: value} """ n_inputs = 2 n_outputs = 1 def __init__(self, x_degree, y_degree, x_domain=None, x_window=None, y_domain=None, y_window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params): self.x_degree = x_degree self.y_degree = y_degree self._order = self.get_num_coeff() # Set the ``x/y_domain`` and ``x/y_wndow`` attributes in subclasses. self._default_domain_window = { 'x_window': (-1, 1), 'y_window': (-1, 1), 'x_domain': None, 'y_domain': None } self.x_window = x_window or self._default_domain_window['x_window'] self.y_window = y_window or self._default_domain_window['y_window'] self.x_domain = x_domain self.y_domain = y_domain self._param_names = self._generate_coeff_names() if n_models: if model_set_axis is None: model_set_axis = 0 minshape = (1,) * model_set_axis + (n_models,) else: minshape = () for param_name in self._param_names: self._parameters_[param_name] = Parameter(param_name, default=np.zeros(minshape)) super().__init__( n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params) @property def x_domain(self): return self._x_domain @x_domain.setter def x_domain(self, val): self._x_domain = _validate_domain_window(val) @property def y_domain(self): return self._y_domain @y_domain.setter def y_domain(self, val): self._y_domain = _validate_domain_window(val) @property def x_window(self): return self._x_window @x_window.setter def x_window(self, val): self._x_window = _validate_domain_window(val) @property def y_window(self): return self._y_window @y_window.setter def y_window(self, val): self._y_window = _validate_domain_window(val) def __repr__(self): return self._format_repr([self.x_degree, self.y_degree], kwargs={'x_domain': self.x_domain, 'y_domain': self.y_domain, 'x_window': self.x_window, 'y_window': self.y_window}, defaults=self._default_domain_window) def __str__(self): return self._format_str( [('X_Degree', self.x_degree), ('Y_Degree', self.y_degree), ('X_Domain', self.x_domain), ('Y_Domain', self.y_domain), ('X_Window', self.x_window), ('Y_Window', self.y_window)], self._default_domain_window) def get_num_coeff(self): """ Determine how many coefficients are needed Returns ------- numc : int number of coefficients """ if self.x_degree < 0 or self.y_degree < 0: raise ValueError("Degree of polynomial must be positive or null") return (self.x_degree + 1) * (self.y_degree + 1) def _invlex(self): # TODO: This is a very slow way to do this; fix it and related methods # like _alpha c = [] xvar = np.arange(self.x_degree + 1) yvar = np.arange(self.y_degree + 1) for j in yvar: for i in xvar: c.append((i, j)) return np.array(c[::-1]) def invlex_coeff(self, coeffs): invlex_coeffs = [] xvar = np.arange(self.x_degree + 1) yvar = np.arange(self.y_degree + 1) for j in yvar: for i in xvar: name = f'c{i}_{j}' coeff = coeffs[self.param_names.index(name)] invlex_coeffs.append(coeff) return np.array(invlex_coeffs[::-1]) def _alpha(self): invlexdeg = self._invlex() invlexdeg[:, 1] = invlexdeg[:, 1] + self.x_degree + 1 nx = self.x_degree + 1 ny = self.y_degree + 1 alpha = np.zeros((ny * nx + 3, ny + nx)) for n in range(len(invlexdeg)): alpha[n][invlexdeg[n]] = [1, 1] alpha[-2, 0] = 1 alpha[-3, nx] = 1 return alpha def imhorner(self, x, y, coeff): _coeff = list(coeff) _coeff.extend([0, 0, 0]) alpha = self._alpha() r0 = _coeff[0] nalpha = len(alpha) karr = np.diff(alpha, axis=0) kfunc = self._fcache(x, y) x_terms = self.x_degree + 1 y_terms = self.y_degree + 1 nterms = x_terms + y_terms for n in range(1, nterms + 1 + 3): setattr(self, 'r' + str(n), 0.) for n in range(1, nalpha): k = karr[n - 1].nonzero()[0].max() + 1 rsum = 0 for i in range(1, k + 1): rsum = rsum + getattr(self, 'r' + str(i)) val = kfunc[k - 1] * (r0 + rsum) setattr(self, 'r' + str(k), val) r0 = _coeff[n] for i in range(1, k): setattr(self, 'r' + str(i), 0.) result = r0 for i in range(1, nterms + 1 + 3): result = result + getattr(self, 'r' + str(i)) return result def _generate_coeff_names(self): names = [] for j in range(self.y_degree + 1): for i in range(self.x_degree + 1): names.append(f'c{i}_{j}') return tuple(names) def _fcache(self, x, y): """ Computation and store the individual functions. To be implemented by subclasses" """ raise NotImplementedError("Subclasses should implement this") def evaluate(self, x, y, *coeffs): if self.x_domain is not None: x = poly_map_domain(x, self.x_domain, self.x_window) if self.y_domain is not None: y = poly_map_domain(y, self.y_domain, self.y_window) invcoeff = self.invlex_coeff(coeffs) return self.imhorner(x, y, invcoeff) def prepare_inputs(self, x, y, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, y, **kwargs) x, y = inputs if x.shape != y.shape: raise ValueError("Expected input arrays to have the same shape") return (x, y), broadcasted_shapes class Chebyshev1D(_PolyDomainWindow1D): r""" Univariate Chebyshev series. It is defined as: .. math:: P(x) = \sum_{i=0}^{i=n}C_{i} * T_{i}(x) where ``T_i(x)`` is the corresponding Chebyshev polynomial of the 1st kind. For explanation of ```domain``, and ``window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- degree : int degree of the series domain : tuple or None, optional window : tuple or None, optional If None, it is set to (-1, 1) Fitters will remap the domain to this window. **params : dict keyword : value pairs, representing parameter_name: value Notes ----- This model does not support the use of units/quantities, because each term in the sum of Chebyshev polynomials is a polynomial in x - since the coefficients within each Chebyshev polynomial are fixed, we can't use quantities for x since the units would not be compatible. For example, the third Chebyshev polynomial (T2) is 2x^2-1, but if x was specified with units, 2x^2 and -1 would have incompatible units. """ n_inputs = 1 n_outputs = 1 _separable = True def __init__(self, degree, domain=None, window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params): super().__init__(degree, domain=domain, window=window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params) def fit_deriv(self, x, *params): """ Computes the Vandermonde matrix. Parameters ---------- x : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ x = np.array(x, dtype=float, copy=False, ndmin=1) v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype) v[0] = 1 if self.degree > 0: x2 = 2 * x v[1] = x for i in range(2, self.degree + 1): v[i] = v[i - 1] * x2 - v[i - 2] return np.rollaxis(v, 0, v.ndim) def prepare_inputs(self, x, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs) x = inputs[0] return (x,), broadcasted_shapes def evaluate(self, x, *coeffs): if self.domain is not None: x = poly_map_domain(x, self.domain, self.window) return self.clenshaw(x, coeffs) @staticmethod def clenshaw(x, coeffs): """Evaluates the polynomial using Clenshaw's algorithm.""" if len(coeffs) == 1: c0 = coeffs[0] c1 = 0 elif len(coeffs) == 2: c0 = coeffs[0] c1 = coeffs[1] else: x2 = 2 * x c0 = coeffs[-2] c1 = coeffs[-1] for i in range(3, len(coeffs) + 1): tmp = c0 c0 = coeffs[-i] - c1 c1 = tmp + c1 * x2 return c0 + c1 * x class Hermite1D(_PolyDomainWindow1D): r""" Univariate Hermite series. It is defined as: .. math:: P(x) = \sum_{i=0}^{i=n}C_{i} * H_{i}(x) where ``H_i(x)`` is the corresponding Hermite polynomial ("Physicist's kind"). For explanation of ``domain``, and ``window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- degree : int degree of the series domain : tuple or None, optional window : tuple or None, optional If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword : value pairs, representing parameter_name: value Notes ----- This model does not support the use of units/quantities, because each term in the sum of Hermite polynomials is a polynomial in x - since the coefficients within each Hermite polynomial are fixed, we can't use quantities for x since the units would not be compatible. For example, the third Hermite polynomial (H2) is 4x^2-2, but if x was specified with units, 4x^2 and -2 would have incompatible units. """ n_inputs = 1 n_outputs = 1 _separable = True def __init__(self, degree, domain=None, window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params): super().__init__( degree, domain, window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params) def fit_deriv(self, x, *params): """ Computes the Vandermonde matrix. Parameters ---------- x : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ x = np.array(x, dtype=float, copy=False, ndmin=1) v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype) v[0] = 1 if self.degree > 0: x2 = 2 * x v[1] = 2 * x for i in range(2, self.degree + 1): v[i] = x2 * v[i - 1] - 2 * (i - 1) * v[i - 2] return np.rollaxis(v, 0, v.ndim) def prepare_inputs(self, x, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs) x = inputs[0] return (x,), broadcasted_shapes def evaluate(self, x, *coeffs): if self.domain is not None: x = poly_map_domain(x, self.domain, self.window) return self.clenshaw(x, coeffs) @staticmethod def clenshaw(x, coeffs): x2 = x * 2 if len(coeffs) == 1: c0 = coeffs[0] c1 = 0 elif len(coeffs) == 2: c0 = coeffs[0] c1 = coeffs[1] else: nd = len(coeffs) c0 = coeffs[-2] c1 = coeffs[-1] for i in range(3, len(coeffs) + 1): temp = c0 nd = nd - 1 c0 = coeffs[-i] - c1 * (2 * (nd - 1)) c1 = temp + c1 * x2 return c0 + c1 * x2 class Hermite2D(OrthoPolynomialBase): r""" Bivariate Hermite series. It is defined as .. math:: P_{nm}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} H_n(x) H_m(y) where ``H_n(x)`` and ``H_m(y)`` are Hermite polynomials. For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- x_degree : int degree in x y_degree : int degree in y x_domain : tuple or None, optional domain of the x independent variable y_domain : tuple or None, optional domain of the y independent variable x_window : tuple or None, optional range of the x independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window y_window : tuple or None, optional range of the y independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword: value pairs, representing parameter_name: value Notes ----- This model does not support the use of units/quantities, because each term in the sum of Hermite polynomials is a polynomial in x and/or y - since the coefficients within each Hermite polynomial are fixed, we can't use quantities for x and/or y since the units would not be compatible. For example, the third Hermite polynomial (H2) is 4x^2-2, but if x was specified with units, 4x^2 and -2 would have incompatible units. """ _separable = False def __init__(self, x_degree, y_degree, x_domain=None, x_window=None, y_domain=None, y_window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params): super().__init__( x_degree, y_degree, x_domain=x_domain, y_domain=y_domain, x_window=x_window, y_window=y_window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params) def _fcache(self, x, y): """ Calculate the individual Hermite functions once and store them in a dictionary to be reused. """ x_terms = self.x_degree + 1 y_terms = self.y_degree + 1 kfunc = {} kfunc[0] = np.ones(x.shape) kfunc[1] = 2 * x.copy() kfunc[x_terms] = np.ones(y.shape) kfunc[x_terms + 1] = 2 * y.copy() for n in range(2, x_terms): kfunc[n] = 2 * x * kfunc[n - 1] - 2 * (n - 1) * kfunc[n - 2] for n in range(x_terms + 2, x_terms + y_terms): kfunc[n] = 2 * y * kfunc[n - 1] - 2 * (n - 1) * kfunc[n - 2] return kfunc def fit_deriv(self, x, y, *params): """ Derivatives with respect to the coefficients. This is an array with Hermite polynomials: .. math:: H_{x_0}H_{y_0}, H_{x_1}H_{y_0}...H_{x_n}H_{y_0}...H_{x_n}H_{y_m} Parameters ---------- x : ndarray input y : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ if x.shape != y.shape: raise ValueError("x and y must have the same shape") x = x.flatten() y = y.flatten() x_deriv = self._hermderiv1d(x, self.x_degree + 1).T y_deriv = self._hermderiv1d(y, self.y_degree + 1).T ij = [] for i in range(self.y_degree + 1): for j in range(self.x_degree + 1): ij.append(x_deriv[j] * y_deriv[i]) v = np.array(ij) return v.T def _hermderiv1d(self, x, deg): """ Derivative of 1D Hermite series """ x = np.array(x, dtype=float, copy=False, ndmin=1) d = np.empty((deg + 1, len(x)), dtype=x.dtype) d[0] = x * 0 + 1 if deg > 0: x2 = 2 * x d[1] = x2 for i in range(2, deg + 1): d[i] = x2 * d[i - 1] - 2 * (i - 1) * d[i - 2] return np.rollaxis(d, 0, d.ndim) class Legendre1D(_PolyDomainWindow1D): r""" Univariate Legendre series. It is defined as: .. math:: P(x) = \sum_{i=0}^{i=n}C_{i} * L_{i}(x) where ``L_i(x)`` is the corresponding Legendre polynomial. For explanation of ``domain``, and ``window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- degree : int degree of the series domain : tuple or None, optional window : tuple or None, optional If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword: value pairs, representing parameter_name: value Notes ----- This model does not support the use of units/quantities, because each term in the sum of Legendre polynomials is a polynomial in x - since the coefficients within each Legendre polynomial are fixed, we can't use quantities for x since the units would not be compatible. For example, the third Legendre polynomial (P2) is 1.5x^2-0.5, but if x was specified with units, 1.5x^2 and -0.5 would have incompatible units. """ n_inputs = 1 n_outputs = 1 _separable = True def __init__(self, degree, domain=None, window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params): super().__init__( degree, domain, window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params) def prepare_inputs(self, x, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs) x = inputs[0] return (x,), broadcasted_shapes def evaluate(self, x, *coeffs): if self.domain is not None: x = poly_map_domain(x, self.domain, self.window) return self.clenshaw(x, coeffs) def fit_deriv(self, x, *params): """ Computes the Vandermonde matrix. Parameters ---------- x : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ x = np.array(x, dtype=float, copy=False, ndmin=1) v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype) v[0] = 1 if self.degree > 0: v[1] = x for i in range(2, self.degree + 1): v[i] = (v[i - 1] * x * (2 * i - 1) - v[i - 2] * (i - 1)) / i return np.rollaxis(v, 0, v.ndim) @staticmethod def clenshaw(x, coeffs): if len(coeffs) == 1: c0 = coeffs[0] c1 = 0 elif len(coeffs) == 2: c0 = coeffs[0] c1 = coeffs[1] else: nd = len(coeffs) c0 = coeffs[-2] c1 = coeffs[-1] for i in range(3, len(coeffs) + 1): tmp = c0 nd = nd - 1 c0 = coeffs[-i] - (c1 * (nd - 1)) / nd c1 = tmp + (c1 * x * (2 * nd - 1)) / nd return c0 + c1 * x class Polynomial1D(_PolyDomainWindow1D): r""" 1D Polynomial model. It is defined as: .. math:: P = \sum_{i=0}^{i=n}C_{i} * x^{i} For explanation of ``domain``, and ``window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- degree : int degree of the series domain : tuple or None, optional If None, it is set to (-1, 1) window : tuple or None, optional If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword: value pairs, representing parameter_name: value """ n_inputs = 1 n_outputs = 1 _separable = True def __init__(self, degree, domain=None, window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params): super().__init__( degree, domain, window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params) # Set domain separately because it's different from # the orthogonal polynomials. self._default_domain_window = {'domain': (-1, 1), 'window': (-1, 1), } self.domain = domain or self._default_domain_window['domain'] self.window = window or self._default_domain_window['window'] def prepare_inputs(self, x, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs) x = inputs[0] return (x,), broadcasted_shapes def evaluate(self, x, *coeffs): if self.domain is not None: x = poly_map_domain(x, self.domain, self.window) return self.horner(x, coeffs) def fit_deriv(self, x, *params): """ Computes the Vandermonde matrix. Parameters ---------- x : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ v = np.empty((self.degree + 1,) + x.shape, dtype=float) v[0] = 1 if self.degree > 0: v[1] = x for i in range(2, self.degree + 1): v[i] = v[i - 1] * x return np.rollaxis(v, 0, v.ndim) @staticmethod def horner(x, coeffs): if len(coeffs) == 1: c0 = coeffs[-1] * np.ones_like(x, subok=False) else: c0 = coeffs[-1] for i in range(2, len(coeffs) + 1): c0 = coeffs[-i] + c0 * x return c0 @property def input_units(self): if self.degree == 0 or self.c1.unit is None: return None else: return {self.inputs[0]: self.c0.unit / self.c1.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): mapping = {} for i in range(self.degree + 1): par = getattr(self, f'c{i}') mapping[par.name] = outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]] ** i return mapping class Polynomial2D(PolynomialModel): r""" 2D Polynomial model. Represents a general polynomial of degree n: .. math:: P(x,y) = c_{00} + c_{10}x + ...+ c_{n0}x^n + c_{01}y + ...+ c_{0n}y^n + c_{11}xy + c_{12}xy^2 + ... + c_{1(n-1)}xy^{n-1}+ ... + c_{(n-1)1}x^{n-1}y For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- degree : int Polynomial degree: largest sum of exponents (:math:`i + j`) of variables in each monomial term of the form :math:`x^i y^j`. The number of terms in a 2D polynomial of degree ``n`` is given by binomial coefficient :math:`C(n + 2, 2) = (n + 2)! / (2!\,n!) = (n + 1)(n + 2) / 2`. x_domain : tuple or None, optional domain of the x independent variable If None, it is set to (-1, 1) y_domain : tuple or None, optional domain of the y independent variable If None, it is set to (-1, 1) x_window : tuple or None, optional range of the x independent variable If None, it is set to (-1, 1) Fitters will remap the x_domain to x_window y_window : tuple or None, optional range of the y independent variable If None, it is set to (-1, 1) Fitters will remap the y_domain to y_window **params : dict keyword: value pairs, representing parameter_name: value """ n_inputs = 2 n_outputs = 1 _separable = False def __init__(self, degree, x_domain=None, y_domain=None, x_window=None, y_window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params): super().__init__( degree, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params) self._default_domain_window = { 'x_domain': (-1, 1), 'y_domain': (-1, 1), 'x_window': (-1, 1), 'y_window': (-1, 1) } self.x_domain = x_domain or self._default_domain_window['x_domain'] self.y_domain = y_domain or self._default_domain_window['y_domain'] self.x_window = x_window or self._default_domain_window['x_window'] self.y_window = y_window or self._default_domain_window['y_window'] def prepare_inputs(self, x, y, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, y, **kwargs) x, y = inputs return (x, y), broadcasted_shapes def evaluate(self, x, y, *coeffs): if self.x_domain is not None: x = poly_map_domain(x, self.x_domain, self.x_window) if self.y_domain is not None: y = poly_map_domain(y, self.y_domain, self.y_window) invcoeff = self.invlex_coeff(coeffs) result = self.multivariate_horner(x, y, invcoeff) # Special case for degree==0 to ensure that the shape of the output is # still as expected by the broadcasting rules, even though the x and y # inputs are not used in the evaluation if self.degree == 0: output_shape = check_broadcast(np.shape(coeffs[0]), x.shape) if output_shape: new_result = np.empty(output_shape) new_result[:] = result result = new_result return result def __repr__(self): return self._format_repr([self.degree], kwargs={'x_domain': self.x_domain, 'y_domain': self.y_domain, 'x_window': self.x_window, 'y_window': self.y_window}, defaults=self._default_domain_window) def __str__(self): return self._format_str([('Degree', self.degree), ('X_Domain', self.x_domain), ('Y_Domain', self.y_domain), ('X_Window', self.x_window), ('Y_Window', self.y_window)], self._default_domain_window) def fit_deriv(self, x, y, *params): """ Computes the Vandermonde matrix. Parameters ---------- x : ndarray input y : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ if x.ndim == 2: x = x.flatten() if y.ndim == 2: y = y.flatten() if x.size != y.size: raise ValueError('Expected x and y to be of equal size') designx = x[:, None] ** np.arange(self.degree + 1) designy = y[:, None] ** np.arange(1, self.degree + 1) designmixed = [] for i in range(1, self.degree): for j in range(1, self.degree): if i + j <= self.degree: designmixed.append((x ** i) * (y ** j)) designmixed = np.array(designmixed).T if designmixed.any(): v = np.hstack([designx, designy, designmixed]) else: v = np.hstack([designx, designy]) return v def invlex_coeff(self, coeffs): invlex_coeffs = [] lencoeff = range(self.degree + 1) for i in lencoeff: for j in lencoeff: if i + j <= self.degree: name = f'c{j}_{i}' coeff = coeffs[self.param_names.index(name)] invlex_coeffs.append(coeff) return invlex_coeffs[::-1] def multivariate_horner(self, x, y, coeffs): """ Multivariate Horner's scheme Parameters ---------- x, y : array coeffs : array Coefficients in inverse lexical order. """ alpha = self._invlex() r0 = coeffs[0] r1 = r0 * 0.0 r2 = r0 * 0.0 karr = np.diff(alpha, axis=0) for n in range(len(karr)): if karr[n, 1] != 0: r2 = y * (r0 + r1 + r2) r1 = np.zeros_like(coeffs[0], subok=False) else: r1 = x * (r0 + r1) r0 = coeffs[n + 1] return r0 + r1 + r2 @property def input_units(self): if self.degree == 0 or (self.c1_0.unit is None and self.c0_1.unit is None): return None return {self.inputs[0]: self.c0_0.unit / self.c1_0.unit, self.inputs[1]: self.c0_0.unit / self.c0_1.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): mapping = {} for i in range(self.degree + 1): for j in range(self.degree + 1): if i + j > 2: continue par = getattr(self, f'c{i}_{j}') mapping[par.name] = (outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]] ** i / inputs_unit[self.inputs[1]] ** j) return mapping @property def x_domain(self): return self._x_domain @x_domain.setter def x_domain(self, val): self._x_domain = _validate_domain_window(val) @property def y_domain(self): return self._y_domain @y_domain.setter def y_domain(self, val): self._y_domain = _validate_domain_window(val) @property def x_window(self): return self._x_window @x_window.setter def x_window(self, val): self._x_window = _validate_domain_window(val) @property def y_window(self): return self._y_window @y_window.setter def y_window(self, val): self._y_window = _validate_domain_window(val) class Chebyshev2D(OrthoPolynomialBase): r""" Bivariate Chebyshev series.. It is defined as .. math:: P_{nm}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} T_n(x ) T_m(y) where ``T_n(x)`` and ``T_m(y)`` are Chebyshev polynomials of the first kind. For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- x_degree : int degree in x y_degree : int degree in y x_domain : tuple or None, optional domain of the x independent variable y_domain : tuple or None, optional domain of the y independent variable x_window : tuple or None, optional range of the x independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window y_window : tuple or None, optional range of the y independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword: value pairs, representing parameter_name: value Notes ----- This model does not support the use of units/quantities, because each term in the sum of Chebyshev polynomials is a polynomial in x and/or y - since the coefficients within each Chebyshev polynomial are fixed, we can't use quantities for x and/or y since the units would not be compatible. For example, the third Chebyshev polynomial (T2) is 2x^2-1, but if x was specified with units, 2x^2 and -1 would have incompatible units. """ _separable = False def __init__(self, x_degree, y_degree, x_domain=None, x_window=None, y_domain=None, y_window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params): super().__init__( x_degree, y_degree, x_domain=x_domain, y_domain=y_domain, x_window=x_window, y_window=y_window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params) def _fcache(self, x, y): """ Calculate the individual Chebyshev functions once and store them in a dictionary to be reused. """ x_terms = self.x_degree + 1 y_terms = self.y_degree + 1 kfunc = {} kfunc[0] = np.ones(x.shape) kfunc[1] = x.copy() kfunc[x_terms] = np.ones(y.shape) kfunc[x_terms + 1] = y.copy() for n in range(2, x_terms): kfunc[n] = 2 * x * kfunc[n - 1] - kfunc[n - 2] for n in range(x_terms + 2, x_terms + y_terms): kfunc[n] = 2 * y * kfunc[n - 1] - kfunc[n - 2] return kfunc def fit_deriv(self, x, y, *params): """ Derivatives with respect to the coefficients. This is an array with Chebyshev polynomials: .. math:: T_{x_0}T_{y_0}, T_{x_1}T_{y_0}...T_{x_n}T_{y_0}...T_{x_n}T_{y_m} Parameters ---------- x : ndarray input y : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ if x.shape != y.shape: raise ValueError("x and y must have the same shape") x = x.flatten() y = y.flatten() x_deriv = self._chebderiv1d(x, self.x_degree + 1).T y_deriv = self._chebderiv1d(y, self.y_degree + 1).T ij = [] for i in range(self.y_degree + 1): for j in range(self.x_degree + 1): ij.append(x_deriv[j] * y_deriv[i]) v = np.array(ij) return v.T def _chebderiv1d(self, x, deg): """ Derivative of 1D Chebyshev series """ x = np.array(x, dtype=float, copy=False, ndmin=1) d = np.empty((deg + 1, len(x)), dtype=x.dtype) d[0] = x * 0 + 1 if deg > 0: x2 = 2 * x d[1] = x for i in range(2, deg + 1): d[i] = d[i - 1] * x2 - d[i - 2] return np.rollaxis(d, 0, d.ndim) class Legendre2D(OrthoPolynomialBase): r""" Bivariate Legendre series. Defined as: .. math:: P_{n_m}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} L_n(x ) L_m(y) where ``L_n(x)`` and ``L_m(y)`` are Legendre polynomials. For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- x_degree : int degree in x y_degree : int degree in y x_domain : tuple or None, optional domain of the x independent variable y_domain : tuple or None, optional domain of the y independent variable x_window : tuple or None, optional range of the x independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window y_window : tuple or None, optional range of the y independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword: value pairs, representing parameter_name: value Notes ----- Model formula: .. math:: P(x) = \sum_{i=0}^{i=n}C_{i} * L_{i}(x) where ``L_{i}`` is the corresponding Legendre polynomial. This model does not support the use of units/quantities, because each term in the sum of Legendre polynomials is a polynomial in x - since the coefficients within each Legendre polynomial are fixed, we can't use quantities for x since the units would not be compatible. For example, the third Legendre polynomial (P2) is 1.5x^2-0.5, but if x was specified with units, 1.5x^2 and -0.5 would have incompatible units. """ _separable = False def __init__(self, x_degree, y_degree, x_domain=None, x_window=None, y_domain=None, y_window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params): super().__init__( x_degree, y_degree, x_domain=x_domain, y_domain=y_domain, x_window=x_window, y_window=y_window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params) def _fcache(self, x, y): """ Calculate the individual Legendre functions once and store them in a dictionary to be reused. """ x_terms = self.x_degree + 1 y_terms = self.y_degree + 1 kfunc = {} kfunc[0] = np.ones(x.shape) kfunc[1] = x.copy() kfunc[x_terms] = np.ones(y.shape) kfunc[x_terms + 1] = y.copy() for n in range(2, x_terms): kfunc[n] = (((2 * (n - 1) + 1) * x * kfunc[n - 1] - (n - 1) * kfunc[n - 2]) / n) for n in range(2, y_terms): kfunc[n + x_terms] = ((2 * (n - 1) + 1) * y * kfunc[n + x_terms - 1] - (n - 1) * kfunc[n + x_terms - 2]) / (n) return kfunc def fit_deriv(self, x, y, *params): """ Derivatives with respect to the coefficients. This is an array with Legendre polynomials: Lx0Ly0 Lx1Ly0...LxnLy0...LxnLym Parameters ---------- x : ndarray input y : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ if x.shape != y.shape: raise ValueError("x and y must have the same shape") x = x.flatten() y = y.flatten() x_deriv = self._legendderiv1d(x, self.x_degree + 1).T y_deriv = self._legendderiv1d(y, self.y_degree + 1).T ij = [] for i in range(self.y_degree + 1): for j in range(self.x_degree + 1): ij.append(x_deriv[j] * y_deriv[i]) v = np.array(ij) return v.T def _legendderiv1d(self, x, deg): """Derivative of 1D Legendre polynomial""" x = np.array(x, dtype=float, copy=False, ndmin=1) d = np.empty((deg + 1,) + x.shape, dtype=x.dtype) d[0] = x * 0 + 1 if deg > 0: d[1] = x for i in range(2, deg + 1): d[i] = (d[i - 1] * x * (2 * i - 1) - d[i - 2] * (i - 1)) / i return np.rollaxis(d, 0, d.ndim) class _SIP1D(PolynomialBase): """ This implements the Simple Imaging Polynomial Model (SIP) in 1D. It's unlikely it will be used in 1D so this class is private and SIP should be used instead. """ n_inputs = 2 n_outputs = 1 _separable = False def __init__(self, order, coeff_prefix, n_models=None, model_set_axis=None, name=None, meta=None, **params): self.order = order self.coeff_prefix = coeff_prefix self._param_names = self._generate_coeff_names(coeff_prefix) if n_models: if model_set_axis is None: model_set_axis = 0 minshape = (1,) * model_set_axis + (n_models,) else: minshape = () for param_name in self._param_names: self._parameters_[param_name] = Parameter(param_name, default=np.zeros(minshape)) super().__init__(n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params) def __repr__(self): return self._format_repr(args=[self.order, self.coeff_prefix]) def __str__(self): return self._format_str( [('Order', self.order), ('Coeff. Prefix', self.coeff_prefix)]) def evaluate(self, x, y, *coeffs): # TODO: Rewrite this so that it uses a simpler method of determining # the matrix based on the number of given coefficients. mcoef = self._coeff_matrix(self.coeff_prefix, coeffs) return self._eval_sip(x, y, mcoef) def get_num_coeff(self, ndim): """ Return the number of coefficients in one param set """ if self.order < 2 or self.order > 9: raise ValueError("Degree of polynomial must be 2< deg < 9") nmixed = comb(self.order, ndim) # remove 3 terms because SIP deg >= 2 numc = self.order * ndim + nmixed - 2 return numc def _generate_coeff_names(self, coeff_prefix): names = [] for i in range(2, self.order + 1): names.append(f'{coeff_prefix}_{i}_{0}') for i in range(2, self.order + 1): names.append(f'{coeff_prefix}_{0}_{i}') for i in range(1, self.order): for j in range(1, self.order): if i + j < self.order + 1: names.append(f'{coeff_prefix}_{i}_{j}') return tuple(names) def _coeff_matrix(self, coeff_prefix, coeffs): mat = np.zeros((self.order + 1, self.order + 1)) for i in range(2, self.order + 1): attr = f'{coeff_prefix}_{i}_{0}' mat[i, 0] = coeffs[self.param_names.index(attr)] for i in range(2, self.order + 1): attr = f'{coeff_prefix}_{0}_{i}' mat[0, i] = coeffs[self.param_names.index(attr)] for i in range(1, self.order): for j in range(1, self.order): if i + j < self.order + 1: attr = f'{coeff_prefix}_{i}_{j}' mat[i, j] = coeffs[self.param_names.index(attr)] return mat def _eval_sip(self, x, y, coef): x = np.asarray(x, dtype=np.float64) y = np.asarray(y, dtype=np.float64) if self.coeff_prefix == 'A': result = np.zeros(x.shape) else: result = np.zeros(y.shape) for i in range(coef.shape[0]): for j in range(coef.shape[1]): if 1 < i + j < self.order + 1: result = result + coef[i, j] * x ** i * y ** j return result class SIP(Model): """ Simple Imaging Polynomial (SIP) model. The SIP convention is used to represent distortions in FITS image headers. See [1]_ for a description of the SIP convention. Parameters ---------- crpix : list or (2,) ndarray CRPIX values a_order : int SIP polynomial order for first axis b_order : int SIP order for second axis a_coeff : dict SIP coefficients for first axis b_coeff : dict SIP coefficients for the second axis ap_order : int order for the inverse transformation (AP coefficients) bp_order : int order for the inverse transformation (BP coefficients) ap_coeff : dict coefficients for the inverse transform bp_coeff : dict coefficients for the inverse transform References ---------- .. [1] `David Shupe, et al, ADASS, ASP Conference Series, Vol. 347, 2005 <https://ui.adsabs.harvard.edu/abs/2005ASPC..347..491S>`_ """ n_inputs = 2 n_outputs = 2 _separable = False def __init__(self, crpix, a_order, b_order, a_coeff={}, b_coeff={}, ap_order=None, bp_order=None, ap_coeff={}, bp_coeff={}, n_models=None, model_set_axis=None, name=None, meta=None): self._crpix = crpix self._a_order = a_order self._b_order = b_order self._a_coeff = a_coeff self._b_coeff = b_coeff self._ap_order = ap_order self._bp_order = bp_order self._ap_coeff = ap_coeff self._bp_coeff = bp_coeff self.shift_a = Shift(-crpix[0]) self.shift_b = Shift(-crpix[1]) self.sip1d_a = _SIP1D(a_order, coeff_prefix='A', n_models=n_models, model_set_axis=model_set_axis, **a_coeff) self.sip1d_b = _SIP1D(b_order, coeff_prefix='B', n_models=n_models, model_set_axis=model_set_axis, **b_coeff) super().__init__(n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta) self._inputs = ("u", "v") self._outputs = ("x", "y") def __repr__(self): return (f"<{self.__class__.__name__}" f"({[self.shift_a, self.shift_b, self.sip1d_a, self.sip1d_b]!r})>") def __str__(self): parts = [f'Model: {self.__class__.__name__}'] for model in [self.shift_a, self.shift_b, self.sip1d_a, self.sip1d_b]: parts.append(indent(str(model), width=4)) parts.append('') return '\n'.join(parts) @property def inverse(self): if (self._ap_order is not None and self._bp_order is not None): return InverseSIP(self._ap_order, self._bp_order, self._ap_coeff, self._bp_coeff) else: raise NotImplementedError("SIP inverse coefficients are not available.") def evaluate(self, x, y): u = self.shift_a.evaluate(x, *self.shift_a.param_sets) v = self.shift_b.evaluate(y, *self.shift_b.param_sets) f = self.sip1d_a.evaluate(u, v, *self.sip1d_a.param_sets) g = self.sip1d_b.evaluate(u, v, *self.sip1d_b.param_sets) return f, g class InverseSIP(Model): """ Inverse Simple Imaging Polynomial Parameters ---------- ap_order : int order for the inverse transformation (AP coefficients) bp_order : int order for the inverse transformation (BP coefficients) ap_coeff : dict coefficients for the inverse transform bp_coeff : dict coefficients for the inverse transform """ n_inputs = 2 n_outputs = 2 _separable = False def __init__(self, ap_order, bp_order, ap_coeff={}, bp_coeff={}, n_models=None, model_set_axis=None, name=None, meta=None): self._ap_order = ap_order self._bp_order = bp_order self._ap_coeff = ap_coeff self._bp_coeff = bp_coeff # define the 0th term in order to use Polynomial2D ap_coeff.setdefault('AP_0_0', 0) bp_coeff.setdefault('BP_0_0', 0) ap_coeff_params = dict((k.replace('AP_', 'c'), v) for k, v in ap_coeff.items()) bp_coeff_params = dict((k.replace('BP_', 'c'), v) for k, v in bp_coeff.items()) self.sip1d_ap = Polynomial2D(degree=ap_order, model_set_axis=model_set_axis, **ap_coeff_params) self.sip1d_bp = Polynomial2D(degree=bp_order, model_set_axis=model_set_axis, **bp_coeff_params) super().__init__(n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta) def __repr__(self): return f'<{self.__class__.__name__}({[self.sip1d_ap, self.sip1d_bp]!r})>' def __str__(self): parts = [f'Model: {self.__class__.__name__}'] for model in [self.sip1d_ap, self.sip1d_bp]: parts.append(indent(str(model), width=4)) parts.append('') return '\n'.join(parts) def evaluate(self, x, y): x1 = self.sip1d_ap.evaluate(x, y, *self.sip1d_ap.param_sets) y1 = self.sip1d_bp.evaluate(x, y, *self.sip1d_bp.param_sets) return x1, y1
45bcf3fc2163a406073de8c43fa0ef7897828832d0ef27c431f3eb8f612bd084
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Tabular models. Tabular models of any dimension can be created using `tabular_model`. For convenience `Tabular1D` and `Tabular2D` are provided. Examples -------- >>> table = np.array([[ 3., 0., 0.], ... [ 0., 2., 0.], ... [ 0., 0., 0.]]) >>> points = ([1, 2, 3], [1, 2, 3]) >>> t2 = Tabular2D(points, lookup_table=table, bounds_error=False, ... fill_value=None, method='nearest') """ # pylint: disable=invalid-name import numpy as np from astropy import units as u from .core import Model try: from scipy.interpolate import interpn has_scipy = True except ImportError: has_scipy = False __all__ = ['tabular_model', 'Tabular1D', 'Tabular2D'] __doctest_requires__ = {('tabular_model'): ['scipy']} class _Tabular(Model): """ Returns an interpolated lookup table value. Parameters ---------- points : tuple of ndarray of float, optional The points defining the regular grid in n dimensions. ndarray must have shapes (m1, ), ..., (mn, ), lookup_table : array-like The data on a regular grid in n dimensions. Must have shapes (m1, ..., mn, ...) method : str, optional The method of interpolation to perform. Supported are "linear" and "nearest", and "splinef2d". "splinef2d" is only supported for 2-dimensional data. Default is "linear". bounds_error : bool, optional If True, when interpolated values are requested outside of the domain of the input data, a ValueError is raised. If False, then ``fill_value`` is used. fill_value : float or `~astropy.units.Quantity`, optional If provided, the value to use for points outside of the interpolation domain. If None, values outside the domain are extrapolated. Extrapolation is not supported by method "splinef2d". If Quantity is given, it will be converted to the unit of ``lookup_table``, if applicable. Returns ------- value : ndarray Interpolated values at input coordinates. Raises ------ ImportError Scipy is not installed. Notes ----- Uses `scipy.interpolate.interpn`. """ linear = False fittable = False standard_broadcasting = False _is_dynamic = True _id = 0 def __init__(self, points=None, lookup_table=None, method='linear', bounds_error=True, fill_value=np.nan, **kwargs): n_models = kwargs.get('n_models', 1) if n_models > 1: raise NotImplementedError('Only n_models=1 is supported.') super().__init__(**kwargs) self.outputs = ("y",) if lookup_table is None: raise ValueError('Must provide a lookup table.') if not isinstance(lookup_table, u.Quantity): lookup_table = np.asarray(lookup_table) if self.lookup_table.ndim != lookup_table.ndim: raise ValueError("lookup_table should be an array with " f"{self.lookup_table.ndim} dimensions.") if points is None: points = tuple(np.arange(x, dtype=float) for x in lookup_table.shape) else: if lookup_table.ndim == 1 and not isinstance(points, tuple): points = (points,) npts = len(points) if npts != lookup_table.ndim: raise ValueError( "Expected grid points in " f"{lookup_table.ndim} directions, got {npts}.") if (npts > 1 and isinstance(points[0], u.Quantity) and len(set([getattr(p, 'unit', None) for p in points])) > 1): raise ValueError('points must all have the same unit.') if isinstance(fill_value, u.Quantity): if not isinstance(lookup_table, u.Quantity): raise ValueError(f"fill value is in {fill_value.unit} but expected to be " "unitless.") fill_value = fill_value.to(lookup_table.unit).value self.points = points self.lookup_table = lookup_table self.bounds_error = bounds_error self.method = method self.fill_value = fill_value def __repr__(self): return (f"<{self.__class__.__name__}(points={self.points}, " f"lookup_table={self.lookup_table})>") def __str__(self): default_keywords = [ ('Model', self.__class__.__name__), ('Name', self.name), ('N_inputs', self.n_inputs), ('N_outputs', self.n_outputs), ('Parameters', ""), (' points', self.points), (' lookup_table', self.lookup_table), (' method', self.method), (' fill_value', self.fill_value), (' bounds_error', self.bounds_error) ] parts = [f'{keyword}: {value}' for keyword, value in default_keywords if value is not None] return '\n'.join(parts) @property def input_units(self): pts = self.points[0] if not isinstance(pts, u.Quantity): return None return dict([(x, pts.unit) for x in self.inputs]) @property def return_units(self): if not isinstance(self.lookup_table, u.Quantity): return None return {self.outputs[0]: self.lookup_table.unit} @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits, ``(points_low, points_high)``. Examples -------- >>> from astropy.modeling.models import Tabular1D, Tabular2D >>> t1 = Tabular1D(points=[1, 2, 3], lookup_table=[10, 20, 30]) >>> t1.bounding_box ModelBoundingBox( intervals={ x: Interval(lower=1, upper=3) } model=Tabular1D(inputs=('x',)) order='C' ) >>> t2 = Tabular2D(points=[[1, 2, 3], [2, 3, 4]], ... lookup_table=[[10, 20, 30], [20, 30, 40]]) >>> t2.bounding_box ModelBoundingBox( intervals={ x: Interval(lower=1, upper=3) y: Interval(lower=2, upper=4) } model=Tabular2D(inputs=('x', 'y')) order='C' ) """ bbox = [(min(p), max(p)) for p in self.points][::-1] if len(bbox) == 1: bbox = bbox[0] return bbox def evaluate(self, *inputs): """ Return the interpolated values at the input coordinates. Parameters ---------- inputs : list of scalar or list of ndarray Input coordinates. The number of inputs must be equal to the dimensions of the lookup table. """ inputs = np.broadcast_arrays(*inputs) shape = inputs[0].shape inputs = [inp.flatten() for inp in inputs[: self.n_inputs]] inputs = np.array(inputs).T if not has_scipy: # pragma: no cover raise ImportError("Tabular model requires scipy.") result = interpn(self.points, self.lookup_table, inputs, method=self.method, bounds_error=self.bounds_error, fill_value=self.fill_value) # return_units not respected when points has no units if (isinstance(self.lookup_table, u.Quantity) and not isinstance(self.points[0], u.Quantity)): result = result * self.lookup_table.unit if self.n_outputs == 1: result = result.reshape(shape) else: result = [r.reshape(shape) for r in result] return result @property def inverse(self): if self.n_inputs == 1: # If the wavelength array is descending instead of ascending, both # points and lookup_table need to be reversed in the inverse transform # for scipy.interpolate to work properly if np.all(np.diff(self.lookup_table) > 0): # ascending case points = self.lookup_table lookup_table = self.points[0] elif np.all(np.diff(self.lookup_table) < 0): # descending case, reverse order points = self.lookup_table[::-1] lookup_table = self.points[0][::-1] else: # equal-valued or double-valued lookup_table raise NotImplementedError return Tabular1D(points=points, lookup_table=lookup_table, method=self.method, bounds_error=self.bounds_error, fill_value=self.fill_value) raise NotImplementedError("An analytical inverse transform " "has not been implemented for this model.") def tabular_model(dim, name=None): """ Make a ``Tabular`` model where ``n_inputs`` is based on the dimension of the lookup_table. This model has to be further initialized and when evaluated returns the interpolated values. Parameters ---------- dim : int Dimensions of the lookup table. name : str Name for the class. Examples -------- >>> table = np.array([[3., 0., 0.], ... [0., 2., 0.], ... [0., 0., 0.]]) >>> tab = tabular_model(2, name='Tabular2D') >>> print(tab) <class 'astropy.modeling.tabular.Tabular2D'> Name: Tabular2D N_inputs: 2 N_outputs: 1 >>> points = ([1, 2, 3], [1, 2, 3]) Setting fill_value to None, allows extrapolation. >>> m = tab(points, lookup_table=table, name='my_table', ... bounds_error=False, fill_value=None, method='nearest') >>> xinterp = [0, 1, 1.5, 2.72, 3.14] >>> m(xinterp, xinterp) # doctest: +FLOAT_CMP array([3., 3., 3., 0., 0.]) """ if dim < 1: raise ValueError('Lookup table must have at least one dimension.') table = np.zeros([2] * dim) members = {'lookup_table': table, 'n_inputs': dim, 'n_outputs': 1} if dim == 1: members['_separable'] = True else: members['_separable'] = False if name is None: model_id = _Tabular._id _Tabular._id += 1 name = f'Tabular{model_id}' model_class = type(str(name), (_Tabular,), members) model_class.__module__ = 'astropy.modeling.tabular' return model_class Tabular1D = tabular_model(1, name='Tabular1D') Tabular2D = tabular_model(2, name='Tabular2D') _tab_docs = """ method : str, optional The method of interpolation to perform. Supported are "linear" and "nearest", and "splinef2d". "splinef2d" is only supported for 2-dimensional data. Default is "linear". bounds_error : bool, optional If True, when interpolated values are requested outside of the domain of the input data, a ValueError is raised. If False, then ``fill_value`` is used. fill_value : float, optional If provided, the value to use for points outside of the interpolation domain. If None, values outside the domain are extrapolated. Extrapolation is not supported by method "splinef2d". Returns ------- value : ndarray Interpolated values at input coordinates. Raises ------ ImportError Scipy is not installed. Notes ----- Uses `scipy.interpolate.interpn`. """ Tabular1D.__doc__ = """ Tabular model in 1D. Returns an interpolated lookup table value. Parameters ---------- points : array-like of float of ndim=1. The points defining the regular grid in n dimensions. lookup_table : array-like, of ndim=1. The data in one dimensions. """ + _tab_docs Tabular2D.__doc__ = """ Tabular model in 2D. Returns an interpolated lookup table value. Parameters ---------- points : tuple of ndarray of float, optional The points defining the regular grid in n dimensions. ndarray with shapes (m1, m2). lookup_table : array-like The data on a regular grid in 2 dimensions. Shape (m1, m2). """ + _tab_docs
08e7f1afa3a78bc139d4b58143a1277cbf1f6893850226cc17254c68d4d93f3f
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Models that have physical origins. """ # pylint: disable=invalid-name, no-member import warnings import numpy as np from astropy import constants as const from astropy import units as u from astropy.utils.exceptions import AstropyUserWarning from .core import Fittable1DModel from .parameters import InputParameterError, Parameter __all__ = ["BlackBody", "Drude1D", "Plummer1D", "NFW"] class BlackBody(Fittable1DModel): """ Blackbody model using the Planck function. Parameters ---------- temperature : `~astropy.units.Quantity` ['temperature'] Blackbody temperature. scale : float or `~astropy.units.Quantity` ['dimensionless'] Scale factor. If dimensionless, input units will assumed to be in Hz and output units in (erg / (cm ** 2 * s * Hz * sr). If not dimensionless, must be equivalent to either (erg / (cm ** 2 * s * Hz * sr) or erg / (cm ** 2 * s * AA * sr), in which case the result will be returned in the requested units and the scale will be stripped of units (with the float value applied). Notes ----- Model formula: .. math:: B_{\\nu}(T) = A \\frac{2 h \\nu^{3} / c^{2}}{exp(h \\nu / k T) - 1} Examples -------- >>> from astropy.modeling import models >>> from astropy import units as u >>> bb = models.BlackBody(temperature=5000*u.K) >>> bb(6000 * u.AA) # doctest: +FLOAT_CMP <Quantity 1.53254685e-05 erg / (cm2 Hz s sr)> .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import BlackBody from astropy import units as u from astropy.visualization import quantity_support bb = BlackBody(temperature=5778*u.K) wav = np.arange(1000, 110000) * u.AA flux = bb(wav) with quantity_support(): plt.figure() plt.semilogx(wav, flux) plt.axvline(bb.nu_max.to(u.AA, equivalencies=u.spectral()).value, ls='--') plt.show() """ # We parametrize this model with a temperature and a scale. temperature = Parameter(default=5000.0, min=0, unit=u.K, description="Blackbody temperature") scale = Parameter(default=1.0, min=0, description="Scale factor") # We allow values without units to be passed when evaluating the model, and # in this case the input x values are assumed to be frequencies in Hz or wavelengths # in AA (depending on the choice of output units controlled by units on scale # and stored in self._output_units during init). _input_units_allow_dimensionless = True # We enable the spectral equivalency by default for the spectral axis input_units_equivalencies = {'x': u.spectral()} # Store the native units returned by B_nu equation _native_units = u.erg / (u.cm ** 2 * u.s * u.Hz * u.sr) # Store the base native output units. If scale is not dimensionless, it # must be equivalent to one of these. If equivalent to SLAM, then # input_units will expect AA for 'x', otherwise Hz. _native_output_units = {'SNU': u.erg / (u.cm ** 2 * u.s * u.Hz * u.sr), 'SLAM': u.erg / (u.cm ** 2 * u.s * u.AA * u.sr)} def __init__(self, *args, **kwargs): scale = kwargs.get('scale', None) # Support scale with non-dimensionless unit by stripping the unit and # storing as self._output_units. if hasattr(scale, 'unit') and not scale.unit.is_equivalent(u.dimensionless_unscaled): output_units = scale.unit if not output_units.is_equivalent(self._native_units, u.spectral_density(1*u.AA)): raise ValueError("scale units not dimensionless or in " f"surface brightness: {output_units}") kwargs['scale'] = scale.value self._output_units = output_units else: self._output_units = self._native_units return super().__init__(*args, **kwargs) def evaluate(self, x, temperature, scale): """Evaluate the model. Parameters ---------- x : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['frequency'] Frequency at which to compute the blackbody. If no units are given, this defaults to Hz (or AA if `scale` was initialized with units equivalent to erg / (cm ** 2 * s * AA * sr)). temperature : float, `~numpy.ndarray`, or `~astropy.units.Quantity` Temperature of the blackbody. If no units are given, this defaults to Kelvin. scale : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['dimensionless'] Desired scale for the blackbody. Returns ------- y : number or ndarray Blackbody spectrum. The units are determined from the units of ``scale``. .. note:: Use `numpy.errstate` to suppress Numpy warnings, if desired. .. warning:: Output values might contain ``nan`` and ``inf``. Raises ------ ValueError Invalid temperature. ZeroDivisionError Wavelength is zero (when converting to frequency). """ if not isinstance(temperature, u.Quantity): in_temp = u.Quantity(temperature, u.K) else: in_temp = temperature if not isinstance(x, u.Quantity): # then we assume it has input_units which depends on the # requested output units (either Hz or AA) in_x = u.Quantity(x, self.input_units['x']) else: in_x = x # Convert to units for calculations, also force double precision with u.add_enabled_equivalencies(u.spectral() + u.temperature()): freq = u.Quantity(in_x, u.Hz, dtype=np.float64) temp = u.Quantity(in_temp, u.K) # Check if input values are physically possible if np.any(temp < 0): raise ValueError(f"Temperature should be positive: {temp}") if not np.all(np.isfinite(freq)) or np.any(freq <= 0): warnings.warn( "Input contains invalid wavelength/frequency value(s)", AstropyUserWarning, ) log_boltz = const.h * freq / (const.k_B * temp) boltzm1 = np.expm1(log_boltz) # Calculate blackbody flux bb_nu = 2.0 * const.h * freq ** 3 / (const.c ** 2 * boltzm1) / u.sr if self.scale.unit is not None: # Will be dimensionless at this point, but may not be dimensionless_unscaled if not hasattr(scale, 'unit'): # during fitting, scale will be passed without units # but we still need to convert from the input dimensionless # to dimensionless unscaled scale = scale * self.scale.unit scale = scale.to(u.dimensionless_unscaled).value # NOTE: scale is already stripped of any input units y = scale * bb_nu.to(self._output_units, u.spectral_density(freq)) # If the temperature parameter has no unit, we should return a unitless # value. This occurs for instance during fitting, since we drop the # units temporarily. if hasattr(temperature, "unit"): return y return y.value @property def input_units(self): # The input units are those of the 'x' value, which will depend on the # units compatible with the expected output units. if self._output_units.is_equivalent(self._native_output_units['SNU']): return {self.inputs[0]: u.Hz} else: # only other option is equivalent with SLAM return {self.inputs[0]: u.AA} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {"temperature": u.K} @property def bolometric_flux(self): """Bolometric flux.""" if self.scale.unit is not None: # Will be dimensionless at this point, but may not be dimensionless_unscaled scale = self.scale.quantity.to(u.dimensionless_unscaled) else: scale = self.scale.value # bolometric flux in the native units of the planck function native_bolflux = ( scale * const.sigma_sb * self.temperature ** 4 / np.pi ) # return in more "astro" units return native_bolflux.to(u.erg / (u.cm ** 2 * u.s)) @property def lambda_max(self): """Peak wavelength when the curve is expressed as power density.""" return const.b_wien / self.temperature @property def nu_max(self): """Peak frequency when the curve is expressed as power density.""" return 2.8214391 * const.k_B * self.temperature / const.h class Drude1D(Fittable1DModel): """ Drude model based one the behavior of electons in materials (esp. metals). Parameters ---------- amplitude : float Peak value x_0 : float Position of the peak fwhm : float Full width at half maximum Model formula: .. math:: f(x) = A \\frac{(fwhm/x_0)^2}{((x/x_0 - x_0/x)^2 + (fwhm/x_0)^2} Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Drude1D fig, ax = plt.subplots() # generate the curves and plot them x = np.arange(7.5 , 12.5 , 0.1) dmodel = Drude1D(amplitude=1.0, fwhm=1.0, x_0=10.0) ax.plot(x, dmodel(x)) ax.set_xlabel('x') ax.set_ylabel('F(x)') plt.show() """ amplitude = Parameter(default=1.0, description="Peak Value") x_0 = Parameter(default=1.0, description="Position of the peak") fwhm = Parameter(default=1.0, description="Full width at half maximum") @staticmethod def evaluate(x, amplitude, x_0, fwhm): """ One dimensional Drude model function """ return ( amplitude * ((fwhm / x_0) ** 2) / ((x / x_0 - x_0 / x) ** 2 + (fwhm / x_0) ** 2) ) @staticmethod def fit_deriv(x, amplitude, x_0, fwhm): """ Drude1D model function derivatives. """ d_amplitude = (fwhm / x_0) ** 2 / ((x / x_0 - x_0 / x) ** 2 + (fwhm / x_0) ** 2) d_x_0 = ( -2 * amplitude * d_amplitude * ( (1 / x_0) + d_amplitude * (x_0 ** 2 / fwhm ** 2) * ( (-x / x_0 - 1 / x) * (x / x_0 - x_0 / x) - (2 * fwhm ** 2 / x_0 ** 3) ) ) ) d_fwhm = (2 * amplitude * d_amplitude / fwhm) * (1 - d_amplitude) return [d_amplitude, d_x_0, d_fwhm] @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "x_0": inputs_unit[self.inputs[0]], "fwhm": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } @property def return_units(self): if self.amplitude.unit is None: return None return {self.outputs[0]: self.amplitude.unit} @x_0.validator def x_0(self, val): """ Ensure `x_0` is not 0.""" if np.any(val == 0): raise InputParameterError("0 is not an allowed value for x_0") def bounding_box(self, factor=50): """Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)``. Parameters ---------- factor : float The multiple of FWHM used to define the limits. """ x0 = self.x_0 dx = factor * self.fwhm return (x0 - dx, x0 + dx) class Plummer1D(Fittable1DModel): r"""One dimensional Plummer density profile model. Parameters ---------- mass : float Total mass of cluster. r_plum : float Scale parameter which sets the size of the cluster core. Notes ----- Model formula: .. math:: \rho(r)=\frac{3M}{4\pi a^3}(1+\frac{r^2}{a^2})^{-5/2} References ---------- .. [1] https://ui.adsabs.harvard.edu/abs/1911MNRAS..71..460P """ mass = Parameter(default=1.0, description="Total mass of cluster") r_plum = Parameter(default=1.0, description="Scale parameter which sets the size of the cluster core") @staticmethod def evaluate(x, mass, r_plum): """ Evaluate plummer density profile model. """ return (3*mass)/(4 * np.pi * r_plum**3) * (1+(x/r_plum)**2)**(-5/2) @staticmethod def fit_deriv(x, mass, r_plum): """ Plummer1D model derivatives. """ d_mass = 3 / ((4*np.pi*r_plum**3) * (((x/r_plum)**2 + 1)**(5/2))) d_r_plum = (6*mass*x**2-9*mass*r_plum**2) / ((4*np.pi * r_plum**6) * (1+(x/r_plum)**2)**(7/2)) return [d_mass, d_r_plum] @property def input_units(self): if self.mass.unit is None and self.r_plum.unit is None: return None else: return {self.inputs[0]: self.r_plum.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'mass': outputs_unit[self.outputs[0]] * inputs_unit[self.inputs[0]] ** 3, 'r_plum': inputs_unit[self.inputs[0]]} class NFW(Fittable1DModel): r""" Navarro–Frenk–White (NFW) profile - model for radial distribution of dark matter. Parameters ---------- mass : float or `~astropy.units.Quantity` ['mass'] Mass of NFW peak within specified overdensity radius. concentration : float Concentration of the NFW profile. redshift : float Redshift of the NFW profile. massfactor : tuple or str Mass overdensity factor and type for provided profiles: Tuple version: ("virial",) : virial radius ("critical", N) : radius where density is N times that of the critical density ("mean", N) : radius where density is N times that of the mean density String version: "virial" : virial radius "Nc" : radius where density is N times that of the critical density (e.g. "200c") "Nm" : radius where density is N times that of the mean density (e.g. "500m") cosmo : :class:`~astropy.cosmology.Cosmology` Background cosmology for density calculation. If None, the default cosmology will be used. Notes ----- Model formula: .. math:: \rho(r)=\frac{\delta_c\rho_{c}}{r/r_s(1+r/r_s)^2} References ---------- .. [1] https://arxiv.org/pdf/astro-ph/9508025 .. [2] https://en.wikipedia.org/wiki/Navarro%E2%80%93Frenk%E2%80%93White_profile .. [3] https://en.wikipedia.org/wiki/Virial_mass """ # Model Parameters # NFW Profile mass mass = Parameter(default=1.0, min=1.0, unit=u.M_sun, description="Peak mass within specified overdensity radius") # NFW profile concentration concentration = Parameter(default=1.0, min=1.0, description="Concentration") # NFW Profile redshift redshift = Parameter(default=0.0, min=0.0, description="Redshift") # We allow values without units to be passed when evaluating the model, and # in this case the input r values are assumed to be lengths / positions in kpc. _input_units_allow_dimensionless = True def __init__(self, mass=u.Quantity(mass.default, mass.unit), concentration=concentration.default, redshift=redshift.default, massfactor=("critical", 200), cosmo=None, **kwargs): # Set default cosmology if cosmo is None: # LOCAL from astropy.cosmology import default_cosmology cosmo = default_cosmology.get() # Set mass overdensity type and factor self._density_delta(massfactor, cosmo, redshift) # Establish mass units for density calculation (default solar masses) if not isinstance(mass, u.Quantity): in_mass = u.Quantity(mass, u.M_sun) else: in_mass = mass # Obtain scale radius self._radius_s(mass, concentration) # Obtain scale density self._density_s(mass, concentration) super().__init__(mass=in_mass, concentration=concentration, redshift=redshift, **kwargs) def evaluate(self, r, mass, concentration, redshift): """ One dimensional NFW profile function Parameters ---------- r : float or `~astropy.units.Quantity` ['length'] Radial position of density to be calculated for the NFW profile. mass : float or `~astropy.units.Quantity` ['mass'] Mass of NFW peak within specified overdensity radius. concentration : float Concentration of the NFW profile. redshift : float Redshift of the NFW profile. Returns ------- density : float or `~astropy.units.Quantity` ['density'] NFW profile mass density at location ``r``. The density units are: [``mass`` / ``r`` ^3] Notes ----- .. warning:: Output values might contain ``nan`` and ``inf``. """ # Create radial version of input with dimension if hasattr(r, "unit"): in_r = r else: in_r = u.Quantity(r, u.kpc) # Define reduced radius (r / r_{\\rm s}) # also update scale radius radius_reduced = in_r / self._radius_s(mass, concentration).to(in_r.unit) # Density distribution # \rho (r)=\frac{\rho_0}{\frac{r}{R_s}\left(1~+~\frac{r}{R_s}\right)^2} # also update scale density density = self._density_s(mass, concentration) / (radius_reduced * (u.Quantity(1.0) + radius_reduced) ** 2) if hasattr(mass, "unit"): return density else: return density.value def _density_delta(self, massfactor, cosmo, redshift): """ Calculate density delta. """ # Set mass overdensity type and factor if isinstance(massfactor, tuple): # Tuple options # ("virial") : virial radius # ("critical", N) : radius where density is N that of the critical density # ("mean", N) : radius where density is N that of the mean density if massfactor[0].lower() == "virial": # Virial Mass delta = None masstype = massfactor[0].lower() elif massfactor[0].lower() == "critical": # Critical or Mean Overdensity Mass delta = float(massfactor[1]) masstype = 'c' elif massfactor[0].lower() == "mean": # Critical or Mean Overdensity Mass delta = float(massfactor[1]) masstype = 'm' else: raise ValueError("Massfactor '" + str(massfactor[0]) + "' not one of 'critical', " "'mean', or 'virial'") else: try: # String options # virial : virial radius # Nc : radius where density is N that of the critical density # Nm : radius where density is N that of the mean density if massfactor.lower() == "virial": # Virial Mass delta = None masstype = massfactor.lower() elif massfactor[-1].lower() == 'c' or massfactor[-1].lower() == 'm': # Critical or Mean Overdensity Mass delta = float(massfactor[0:-1]) masstype = massfactor[-1].lower() else: raise ValueError("Massfactor " + str(massfactor) + " string not of the form " "'#m', '#c', or 'virial'") except (AttributeError, TypeError): raise TypeError("Massfactor " + str( massfactor) + " not a tuple or string") # Set density from masstype specification if masstype == "virial": Om_c = cosmo.Om(redshift) - 1.0 d_c = 18.0 * np.pi ** 2 + 82.0 * Om_c - 39.0 * Om_c ** 2 self.density_delta = d_c * cosmo.critical_density(redshift) elif masstype == 'c': self.density_delta = delta * cosmo.critical_density(redshift) elif masstype == 'm': self.density_delta = delta * cosmo.critical_density(redshift) * cosmo.Om(redshift) return self.density_delta @staticmethod def A_NFW(y): r""" Dimensionless volume integral of the NFW profile, used as an intermediate step in some calculations for this model. Notes ----- Model formula: .. math:: A_{NFW} = [\ln(1+y) - \frac{y}{1+y}] """ return np.log(1.0 + y) - (y / (1.0 + y)) def _density_s(self, mass, concentration): """ Calculate scale density of the NFW profile. """ # Enforce default units if not isinstance(mass, u.Quantity): in_mass = u.Quantity(mass, u.M_sun) else: in_mass = mass # Calculate scale density # M_{200} = 4\pi \rho_{s} R_{s}^3 \left[\ln(1+c) - \frac{c}{1+c}\right]. self.density_s = in_mass / (4.0 * np.pi * self._radius_s(in_mass, concentration) ** 3 * self.A_NFW(concentration)) return self.density_s @property def rho_scale(self): r""" Scale density of the NFW profile. Often written in the literature as :math:`\rho_s` """ return self.density_s def _radius_s(self, mass, concentration): """ Calculate scale radius of the NFW profile. """ # Enforce default units if not isinstance(mass, u.Quantity): in_mass = u.Quantity(mass, u.M_sun) else: in_mass = mass # Delta Mass is related to delta radius by # M_{200}=\frac{4}{3}\pi r_{200}^3 200 \rho_{c} # And delta radius is related to the NFW scale radius by # c = R / r_{\\rm s} self.radius_s = (((3.0 * in_mass) / (4.0 * np.pi * self.density_delta)) ** ( 1.0 / 3.0)) / concentration # Set radial units to kiloparsec by default (unit will be rescaled by units of radius # in evaluate) return self.radius_s.to(u.kpc) @property def r_s(self): """ Scale radius of the NFW profile. """ return self.radius_s @property def r_virial(self): """ Mass factor defined virial radius of the NFW profile (R200c for M200c, Rvir for Mvir, etc.). """ return self.r_s * self.concentration @property def r_max(self): """ Radius of maximum circular velocity. """ return self.r_s * 2.16258 @property def v_max(self): """ Maximum circular velocity. """ return self.circular_velocity(self.r_max) def circular_velocity(self, r): r""" Circular velocities of the NFW profile. Parameters ---------- r : float or `~astropy.units.Quantity` ['length'] Radial position of velocity to be calculated for the NFW profile. Returns ------- velocity : float or `~astropy.units.Quantity` ['speed'] NFW profile circular velocity at location ``r``. The velocity units are: [km / s] Notes ----- Model formula: .. math:: v_{circ}(r)^2 = \frac{1}{x}\frac{\ln(1+cx)-(cx)/(1+cx)}{\ln(1+c)-c/(1+c)} .. math:: x = r/r_s .. warning:: Output values might contain ``nan`` and ``inf``. """ # Enforce default units (if parameters are without units) if hasattr(r, "unit"): in_r = r else: in_r = u.Quantity(r, u.kpc) # Mass factor defined velocity (i.e. V200c for M200c, Rvir for Mvir) v_profile = np.sqrt(self.mass * const.G.to(in_r.unit**3 / (self.mass.unit * u.s**2)) / self.r_virial) # Define reduced radius (r / r_{\\rm s}) reduced_radius = in_r / self.r_virial.to(in_r.unit) # Circular velocity given by: # v^2=\frac{1}{x}\frac{\ln(1+cx)-(cx)/(1+cx)}{\ln(1+c)-c/(1+c)} # where x=r/r_{200} velocity = np.sqrt((v_profile**2 * self.A_NFW(self.concentration * reduced_radius)) / (reduced_radius * self.A_NFW(self.concentration))) return velocity.to(u.km / u.s) @property def input_units(self): # The units for the 'r' variable should be a length (default kpc) return {self.inputs[0]: u.kpc} @property def return_units(self): # The units for the 'density' variable should be a matter density (default M_sun / kpc^3) if (self.mass.unit is None): return {self.outputs[0]: u.M_sun / self.input_units[self.inputs[0]] ** 3} else: return {self.outputs[0]: self.mass.unit / self.input_units[self.inputs[0]] ** 3} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'mass': u.M_sun, "concentration": None, "redshift": None}
4d90cbfab07e7be952216a576d61c3e7451092efa9dd0a037d943841805a6cec
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Statistic functions used in `~astropy.modeling.fitting`. """ # pylint: disable=invalid-name import numpy as np __all__ = ["leastsquare", "leastsquare_1d", "leastsquare_2d", "leastsquare_3d"] def leastsquare(measured_vals, updated_model, weights, *x): """Least square statistic, with optional weights, in N-dimensions. Parameters ---------- measured_vals : ndarray or sequence Measured data values. Will be cast to array whose shape must match the array-cast of the evaluated model. updated_model : :class:`~astropy.modeling.Model` instance Model with parameters set by the current iteration of the optimizer. when evaluated on "x", must return array of shape "measured_vals" weights : ndarray or None Array of weights to apply to each residual. *x : ndarray Independent variables on which to evaluate the model. Returns ------- res : float The sum of least squares. See Also -------- :func:`~astropy.modeling.statistic.leastsquare_1d` :func:`~astropy.modeling.statistic.leastsquare_2d` :func:`~astropy.modeling.statistic.leastsquare_3d` Notes ----- Models in :mod:`~astropy.modeling` have broadcasting rules that try to match inputs with outputs with Model shapes. Numpy arrays have flexible broadcasting rules, so mismatched shapes can often be made compatible. To ensure data matches the model we must perform shape comparison and leverage the Numpy arithmetic functions. This can obfuscate arithmetic computation overrides, like with Quantities. Implement a custom statistic for more direct control. """ model_vals = updated_model(*x) if np.shape(model_vals) != np.shape(measured_vals): raise ValueError(f"Shape mismatch between model ({np.shape(model_vals)}) " f"and measured ({np.shape(measured_vals)})") if weights is None: weights = 1.0 return np.sum(np.square(weights * np.subtract(model_vals, measured_vals))) # ------------------------------------------------------------------- def leastsquare_1d(measured_vals, updated_model, weights, x): """ Least square statistic with optional weights. Safer than the general :func:`~astropy.modeling.statistic.leastsquare` for 1D models by avoiding numpy methods that support broadcasting. Parameters ---------- measured_vals : ndarray Measured data values. updated_model : `~astropy.modeling.Model` Model with parameters set by the current iteration of the optimizer. weights : ndarray or None Array of weights to apply to each residual. x : ndarray Independent variable "x" on which to evaluate the model. Returns ------- res : float The sum of least squares. See Also -------- :func:`~astropy.modeling.statistic.leastsquare` """ model_vals = updated_model(x) if weights is None: return np.sum((model_vals - measured_vals) ** 2) return np.sum((weights * (model_vals - measured_vals)) ** 2) def leastsquare_2d(measured_vals, updated_model, weights, x, y): """ Least square statistic with optional weights. Safer than the general :func:`~astropy.modeling.statistic.leastsquare` for 2D models by avoiding numpy methods that support broadcasting. Parameters ---------- measured_vals : ndarray Measured data values. updated_model : `~astropy.modeling.Model` Model with parameters set by the current iteration of the optimizer. weights : ndarray or None Array of weights to apply to each residual. x : ndarray Independent variable "x" on which to evaluate the model. y : ndarray Independent variable "y" on which to evaluate the model. Returns ------- res : float The sum of least squares. See Also -------- :func:`~astropy.modeling.statistic.leastsquare` """ model_vals = updated_model(x, y) if weights is None: return np.sum((model_vals - measured_vals) ** 2) return np.sum((weights * (model_vals - measured_vals)) ** 2) def leastsquare_3d(measured_vals, updated_model, weights, x, y, z): """ Least square statistic with optional weights. Safer than the general :func:`~astropy.modeling.statistic.leastsquare` for 3D models by avoiding numpy methods that support broadcasting. Parameters ---------- measured_vals : ndarray Measured data values. updated_model : `~astropy.modeling.Model` Model with parameters set by the current iteration of the optimizer. weights : ndarray or None Array of weights to apply to each residual. x : ndarray Independent variable "x" on which to evaluate the model. y : ndarray Independent variable "y" on which to evaluate the model. z : ndarray Independent variable "z" on which to evaluate the model. Returns ------- res : float The sum of least squares. See Also -------- :func:`~astropy.modeling.statistic.leastsquare` """ model_vals = updated_model(x, y, z) if weights is None: return np.sum((model_vals - measured_vals) ** 2) return np.sum((weights * (model_vals - measured_vals)) ** 2)
e1a9c2ed67128e7b5924dfb388c34094eeff3e0e8e995e7b32270f418b7a1a42
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Power law model variants """ # pylint: disable=invalid-name import numpy as np from astropy.units import Quantity from .core import Fittable1DModel from .parameters import InputParameterError, Parameter __all__ = ['PowerLaw1D', 'BrokenPowerLaw1D', 'SmoothlyBrokenPowerLaw1D', 'ExponentialCutoffPowerLaw1D', 'LogParabola1D', 'Schechter1D'] class PowerLaw1D(Fittable1DModel): """ One dimensional power law model. Parameters ---------- amplitude : float Model amplitude at the reference point x_0 : float Reference point alpha : float Power law index See Also -------- BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D Notes ----- Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha``): .. math:: f(x) = A (x / x_0) ^ {-\\alpha} """ amplitude = Parameter(default=1, description="Peak value at the reference point") x_0 = Parameter(default=1, description="Reference point") alpha = Parameter(default=1, description="Power law index") @staticmethod def evaluate(x, amplitude, x_0, alpha): """One dimensional power law model function""" xx = x / x_0 return amplitude * xx ** (-alpha) @staticmethod def fit_deriv(x, amplitude, x_0, alpha): """One dimensional power law derivative with respect to parameters""" xx = x / x_0 d_amplitude = xx ** (-alpha) d_x_0 = amplitude * alpha * d_amplitude / x_0 d_alpha = -amplitude * d_amplitude * np.log(xx) return [d_amplitude, d_x_0, d_alpha] @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_0': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class BrokenPowerLaw1D(Fittable1DModel): """ One dimensional power law model with a break. Parameters ---------- amplitude : float Model amplitude at the break point. x_break : float Break point. alpha_1 : float Power law index for x < x_break. alpha_2 : float Power law index for x > x_break. See Also -------- PowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D Notes ----- Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha_1` for ``alpha_1`` and :math:`\\alpha_2` for ``alpha_2``): .. math:: f(x) = \\left \\{ \\begin{array}{ll} A (x / x_{break}) ^ {-\\alpha_1} & : x < x_{break} \\\\ A (x / x_{break}) ^ {-\\alpha_2} & : x > x_{break} \\\\ \\end{array} \\right. """ amplitude = Parameter(default=1, description="Peak value at break point") x_break = Parameter(default=1, description="Break point") alpha_1 = Parameter(default=1, description="Power law index before break point") alpha_2 = Parameter(default=1, description="Power law index after break point") @staticmethod def evaluate(x, amplitude, x_break, alpha_1, alpha_2): """One dimensional broken power law model function""" alpha = np.where(x < x_break, alpha_1, alpha_2) xx = x / x_break return amplitude * xx ** (-alpha) @staticmethod def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2): """One dimensional broken power law derivative with respect to parameters""" alpha = np.where(x < x_break, alpha_1, alpha_2) xx = x / x_break d_amplitude = xx ** (-alpha) d_x_break = amplitude * alpha * d_amplitude / x_break d_alpha = -amplitude * d_amplitude * np.log(xx) d_alpha_1 = np.where(x < x_break, d_alpha, 0) d_alpha_2 = np.where(x >= x_break, d_alpha, 0) return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2] @property def input_units(self): if self.x_break.unit is None: return None return {self.inputs[0]: self.x_break.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_break': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class SmoothlyBrokenPowerLaw1D(Fittable1DModel): """One dimensional smoothly broken power law model. Parameters ---------- amplitude : float Model amplitude at the break point. x_break : float Break point. alpha_1 : float Power law index for ``x << x_break``. alpha_2 : float Power law index for ``x >> x_break``. delta : float Smoothness parameter. See Also -------- BrokenPowerLaw1D Notes ----- Model formula (with :math:`A` for ``amplitude``, :math:`x_b` for ``x_break``, :math:`\\alpha_1` for ``alpha_1``, :math:`\\alpha_2` for ``alpha_2`` and :math:`\\Delta` for ``delta``): .. math:: f(x) = A \\left( \\frac{x}{x_b} \\right) ^ {-\\alpha_1} \\left\\{ \\frac{1}{2} \\left[ 1 + \\left( \\frac{x}{x_b}\\right)^{1 / \\Delta} \\right] \\right\\}^{(\\alpha_1 - \\alpha_2) \\Delta} The change of slope occurs between the values :math:`x_1` and :math:`x_2` such that: .. math:: \\log_{10} \\frac{x_2}{x_b} = \\log_{10} \\frac{x_b}{x_1} \\sim \\Delta At values :math:`x \\lesssim x_1` and :math:`x \\gtrsim x_2` the model is approximately a simple power law with index :math:`\\alpha_1` and :math:`\\alpha_2` respectively. The two power laws are smoothly joined at values :math:`x_1 < x < x_2`, hence the :math:`\\Delta` parameter sets the "smoothness" of the slope change. The ``delta`` parameter is bounded to values greater than 1e-3 (corresponding to :math:`x_2 / x_1 \\gtrsim 1.002`) to avoid overflow errors. The ``amplitude`` parameter is bounded to positive values since this model is typically used to represent positive quantities. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling import models x = np.logspace(0.7, 2.3, 500) f = models.SmoothlyBrokenPowerLaw1D(amplitude=1, x_break=20, alpha_1=-2, alpha_2=2) plt.figure() plt.title("amplitude=1, x_break=20, alpha_1=-2, alpha_2=2") f.delta = 0.5 plt.loglog(x, f(x), '--', label='delta=0.5') f.delta = 0.3 plt.loglog(x, f(x), '-.', label='delta=0.3') f.delta = 0.1 plt.loglog(x, f(x), label='delta=0.1') plt.axis([x.min(), x.max(), 0.1, 1.1]) plt.legend(loc='lower center') plt.grid(True) plt.show() """ amplitude = Parameter(default=1, min=0, description="Peak value at break point") x_break = Parameter(default=1, description="Break point") alpha_1 = Parameter(default=-2, description="Power law index before break point") alpha_2 = Parameter(default=2, description="Power law index after break point") delta = Parameter(default=1, min=1.e-3, description="Smoothness Parameter") @amplitude.validator def amplitude(self, value): if np.any(value <= 0): raise InputParameterError( "amplitude parameter must be > 0") @delta.validator def delta(self, value): if np.any(value < 0.001): raise InputParameterError( "delta parameter must be >= 0.001") @staticmethod def evaluate(x, amplitude, x_break, alpha_1, alpha_2, delta): """One dimensional smoothly broken power law model function""" # Pre-calculate `x/x_b` xx = x / x_break # Initialize the return value f = np.zeros_like(xx, subok=False) if isinstance(amplitude, Quantity): return_unit = amplitude.unit amplitude = amplitude.value else: return_unit = None # The quantity `t = (x / x_b)^(1 / delta)` can become quite # large. To avoid overflow errors we will start by calculating # its natural logarithm: logt = np.log(xx) / delta # When `t >> 1` or `t << 1` we don't actually need to compute # the `t` value since the main formula (see docstring) can be # significantly simplified by neglecting `1` or `t` # respectively. In the following we will check whether `t` is # much greater, much smaller, or comparable to 1 by comparing # the `logt` value with an appropriate threshold. threshold = 30 # corresponding to exp(30) ~ 1e13 i = logt > threshold if i.max(): # In this case the main formula reduces to a simple power # law with index `alpha_2`. f[i] = amplitude * xx[i] ** (-alpha_2) / (2. ** ((alpha_1 - alpha_2) * delta)) i = logt < -threshold if i.max(): # In this case the main formula reduces to a simple power # law with index `alpha_1`. f[i] = amplitude * xx[i] ** (-alpha_1) / (2. ** ((alpha_1 - alpha_2) * delta)) i = np.abs(logt) <= threshold if i.max(): # In this case the `t` value is "comparable" to 1, hence we # we will evaluate the whole formula. t = np.exp(logt[i]) r = (1. + t) / 2. f[i] = amplitude * xx[i] ** (-alpha_1) * r ** ((alpha_1 - alpha_2) * delta) if return_unit: return Quantity(f, unit=return_unit, copy=False) return f @staticmethod def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2, delta): """One dimensional smoothly broken power law derivative with respect to parameters""" # Pre-calculate `x_b` and `x/x_b` and `logt` (see comments in # SmoothlyBrokenPowerLaw1D.evaluate) xx = x / x_break logt = np.log(xx) / delta # Initialize the return values f = np.zeros_like(xx) d_amplitude = np.zeros_like(xx) d_x_break = np.zeros_like(xx) d_alpha_1 = np.zeros_like(xx) d_alpha_2 = np.zeros_like(xx) d_delta = np.zeros_like(xx) threshold = 30 # (see comments in SmoothlyBrokenPowerLaw1D.evaluate) i = logt > threshold if i.max(): f[i] = amplitude * xx[i] ** (-alpha_2) \ / (2. ** ((alpha_1 - alpha_2) * delta)) d_amplitude[i] = f[i] / amplitude d_x_break[i] = f[i] * alpha_2 / x_break d_alpha_1[i] = f[i] * (-delta * np.log(2)) d_alpha_2[i] = f[i] * (-np.log(xx[i]) + delta * np.log(2)) d_delta[i] = f[i] * (-(alpha_1 - alpha_2) * np.log(2)) i = logt < -threshold if i.max(): f[i] = amplitude * xx[i] ** (-alpha_1) \ / (2. ** ((alpha_1 - alpha_2) * delta)) d_amplitude[i] = f[i] / amplitude d_x_break[i] = f[i] * alpha_1 / x_break d_alpha_1[i] = f[i] * (-np.log(xx[i]) - delta * np.log(2)) d_alpha_2[i] = f[i] * delta * np.log(2) d_delta[i] = f[i] * (-(alpha_1 - alpha_2) * np.log(2)) i = np.abs(logt) <= threshold if i.max(): t = np.exp(logt[i]) r = (1. + t) / 2. f[i] = amplitude * xx[i] ** (-alpha_1) \ * r ** ((alpha_1 - alpha_2) * delta) d_amplitude[i] = f[i] / amplitude d_x_break[i] = f[i] * (alpha_1 - (alpha_1 - alpha_2) * t / 2. / r) / x_break d_alpha_1[i] = f[i] * (-np.log(xx[i]) + delta * np.log(r)) d_alpha_2[i] = f[i] * (-delta * np.log(r)) d_delta[i] = f[i] * (alpha_1 - alpha_2) \ * (np.log(r) - t / (1. + t) / delta * np.log(xx[i])) return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2, d_delta] @property def input_units(self): if self.x_break.unit is None: return None return {self.inputs[0]: self.x_break.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_break': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class ExponentialCutoffPowerLaw1D(Fittable1DModel): """ One dimensional power law model with an exponential cutoff. Parameters ---------- amplitude : float Model amplitude x_0 : float Reference point alpha : float Power law index x_cutoff : float Cutoff point See Also -------- PowerLaw1D, BrokenPowerLaw1D, LogParabola1D Notes ----- Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha``): .. math:: f(x) = A (x / x_0) ^ {-\\alpha} \\exp (-x / x_{cutoff}) """ amplitude = Parameter(default=1, description="Peak value of model") x_0 = Parameter(default=1, description="Reference point") alpha = Parameter(default=1, description="Power law index") x_cutoff = Parameter(default=1, description="Cutoff point") @staticmethod def evaluate(x, amplitude, x_0, alpha, x_cutoff): """One dimensional exponential cutoff power law model function""" xx = x / x_0 return amplitude * xx ** (-alpha) * np.exp(-x / x_cutoff) @staticmethod def fit_deriv(x, amplitude, x_0, alpha, x_cutoff): """One dimensional exponential cutoff power law derivative with respect to parameters""" xx = x / x_0 xc = x / x_cutoff d_amplitude = xx ** (-alpha) * np.exp(-xc) d_x_0 = alpha * amplitude * d_amplitude / x_0 d_alpha = -amplitude * d_amplitude * np.log(xx) d_x_cutoff = amplitude * x * d_amplitude / x_cutoff ** 2 return [d_amplitude, d_x_0, d_alpha, d_x_cutoff] @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_0': inputs_unit[self.inputs[0]], 'x_cutoff': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class LogParabola1D(Fittable1DModel): """ One dimensional log parabola model (sometimes called curved power law). Parameters ---------- amplitude : float Model amplitude x_0 : float Reference point alpha : float Power law index beta : float Power law curvature See Also -------- PowerLaw1D, BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D Notes ----- Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha`` and :math:`\\beta` for ``beta``): .. math:: f(x) = A \\left( \\frac{x}{x_{0}}\\right)^{- \\alpha - \\beta \\log{\\left (\\frac{x}{x_{0}} \\right )}} """ amplitude = Parameter(default=1, description="Peak value of model") x_0 = Parameter(default=1, description="Reference point") alpha = Parameter(default=1, description="Power law index") beta = Parameter(default=0, description="Power law curvature") @staticmethod def evaluate(x, amplitude, x_0, alpha, beta): """One dimensional log parabola model function""" xx = x / x_0 exponent = -alpha - beta * np.log(xx) return amplitude * xx ** exponent @staticmethod def fit_deriv(x, amplitude, x_0, alpha, beta): """One dimensional log parabola derivative with respect to parameters""" xx = x / x_0 log_xx = np.log(xx) exponent = -alpha - beta * log_xx d_amplitude = xx ** exponent d_beta = -amplitude * d_amplitude * log_xx ** 2 d_x_0 = amplitude * d_amplitude * (beta * log_xx / x_0 - exponent / x_0) d_alpha = -amplitude * d_amplitude * log_xx return [d_amplitude, d_x_0, d_alpha, d_beta] @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_0': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class Schechter1D(Fittable1DModel): r""" Schechter luminosity function (`Schechter 1976 <https://ui.adsabs.harvard.edu/abs/1976ApJ...203..297S/abstract>`_), parameterized in terms of magnitudes. Parameters ---------- phi_star : float The normalization factor in units of number density. m_star : float The characteristic magnitude where the power-law form of the function cuts off. Must not have units. alpha : float The power law index, also known as the faint-end slope. Must not have units. See Also -------- PowerLaw1D, ExponentialCutoffPowerLaw1D, BrokenPowerLaw1D Notes ----- Model formula (with :math:`\phi^{*}` for ``phi_star``, :math:`M^{*}` for ``m_star``, and :math:`\alpha` for ``alpha``): .. math:: n(M) \ dM = (0.4 \ln 10) \ \phi^{*} \ [{10^{0.4 (M^{*} - M)}}]^{\alpha + 1} \ \exp{[-10^{0.4 (M^{*} - M)}]} \ dM ``phi_star`` is the normalization factor in units of number density. ``m_star`` is the characteristic magnitude where the power-law form of the function cuts off into the exponential form. ``alpha`` is the power-law index, defining the faint-end slope of the luminosity function. Examples -------- .. plot:: :include-source: from astropy.modeling.models import Schechter1D import astropy.units as u import matplotlib.pyplot as plt import numpy as np phi_star = 4.3e-4 * (u.Mpc ** -3) m_star = -20.26 alpha = -1.98 model = Schechter1D(phi_star, m_star, alpha) mag = np.linspace(-25, -17) fig, ax = plt.subplots() ax.plot(mag, model(mag)) ax.set_yscale('log') ax.set_xlim(-22.6, -17) ax.set_ylim(1.e-7, 1.e-2) ax.set_xlabel('$M_{UV}$') ax.set_ylabel('$\phi$ [mag$^{-1}$ Mpc$^{-3}]$') References ---------- .. [1] Schechter 1976; ApJ 203, 297 (https://ui.adsabs.harvard.edu/abs/1976ApJ...203..297S/abstract) .. [2] `Luminosity function <https://en.wikipedia.org/wiki/Luminosity_function_(astronomy)>`_ """ phi_star = Parameter(default=1., description=('Normalization factor ' 'in units of number density')) m_star = Parameter(default=-20., description='Characteristic magnitude') alpha = Parameter(default=-1., description='Faint-end slope') @staticmethod def evaluate(mag, phi_star, m_star, alpha): """Schechter luminosity function model function.""" if isinstance(mag, Quantity) or isinstance(m_star, Quantity): raise ValueError('mag and m_star must not have units') factor = 10 ** (0.4 * (m_star - mag)) return (0.4 * np.log(10) * phi_star * factor**(alpha + 1) * np.exp(-factor)) @staticmethod def fit_deriv(mag, phi_star, m_star, alpha): """ Schechter luminosity function derivative with respect to parameters. """ if isinstance(mag, Quantity) or isinstance(m_star, Quantity): raise ValueError('mag and m_star must not have units') factor = 10 ** (0.4 * (m_star - mag)) d_phi_star = 0.4 * np.log(10) * factor**(alpha + 1) * np.exp(-factor) func = phi_star * d_phi_star d_m_star = ((alpha + 1) * 0.4 * np.log(10) * func - (0.4 * np.log(10) * func * factor)) d_alpha = func * np.log(factor) return [d_phi_star, d_m_star, d_alpha] @property def input_units(self): if self.m_star.unit is None: return None return {self.inputs[0]: self.m_star.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'m_star': inputs_unit[self.inputs[0]], 'phi_star': outputs_unit[self.outputs[0]]}
c2b31b48a91d7fa3d436ebd0ee91390fa888d52372811d2400b7e41722cef8e8
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """Sundry function and class decorators.""" import functools import inspect import textwrap import threading import types import warnings from inspect import signature from .exceptions import (AstropyDeprecationWarning, AstropyUserWarning, AstropyPendingDeprecationWarning) __all__ = ['classproperty', 'deprecated', 'deprecated_attribute', 'deprecated_renamed_argument', 'format_doc', 'lazyproperty', 'sharedmethod'] _NotFound = object() def deprecated(since, message='', name='', alternative='', pending=False, obj_type=None, warning_type=AstropyDeprecationWarning): """ Used to mark a function or class as deprecated. To mark an attribute as deprecated, use `deprecated_attribute`. Parameters ---------- since : str The release at which this API became deprecated. This is required. message : str, optional Override the default deprecation message. The format specifier ``func`` may be used for the name of the function, and ``alternative`` may be used in the deprecation message to insert the name of an alternative to the deprecated function. ``obj_type`` may be used to insert a friendly name for the type of object being deprecated. name : str, optional The name of the deprecated function or class; if not provided the name is automatically determined from the passed in function or class, though this is useful in the case of renamed functions, where the new function is just assigned to the name of the deprecated function. For example:: def new_function(): ... oldFunction = new_function alternative : str, optional An alternative function or class name that the user may use in place of the deprecated object. The deprecation warning will tell the user about this alternative if provided. pending : bool, optional If True, uses a AstropyPendingDeprecationWarning instead of a ``warning_type``. obj_type : str, optional The type of this object, if the automatically determined one needs to be overridden. warning_type : Warning Warning to be issued. Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`. """ method_types = (classmethod, staticmethod, types.MethodType) def deprecate_doc(old_doc, message): """ Returns a given docstring with a deprecation message prepended to it. """ if not old_doc: old_doc = '' old_doc = textwrap.dedent(old_doc).strip('\n') new_doc = (('\n.. deprecated:: {since}' '\n {message}\n\n'.format( **{'since': since, 'message': message.strip()})) + old_doc) if not old_doc: # This is to prevent a spurious 'unexpected unindent' warning from # docutils when the original docstring was blank. new_doc += r'\ ' return new_doc def get_function(func): """ Given a function or classmethod (or other function wrapper type), get the function object. """ if isinstance(func, method_types): func = func.__func__ return func def deprecate_function(func, message, warning_type=warning_type): """ Returns a wrapped function that displays ``warning_type`` when it is called. """ if isinstance(func, method_types): func_wrapper = type(func) else: func_wrapper = lambda f: f # noqa: E731 func = get_function(func) def deprecated_func(*args, **kwargs): if pending: category = AstropyPendingDeprecationWarning else: category = warning_type warnings.warn(message, category, stacklevel=2) return func(*args, **kwargs) # If this is an extension function, we can't call # functools.wraps on it, but we normally don't care. # This crazy way to get the type of a wrapper descriptor is # straight out of the Python 3.3 inspect module docs. if type(func) is not type(str.__dict__['__add__']): # noqa: E721 deprecated_func = functools.wraps(func)(deprecated_func) deprecated_func.__doc__ = deprecate_doc( deprecated_func.__doc__, message) return func_wrapper(deprecated_func) def deprecate_class(cls, message, warning_type=warning_type): """ Update the docstring and wrap the ``__init__`` in-place (or ``__new__`` if the class or any of the bases overrides ``__new__``) so it will give a deprecation warning when an instance is created. This won't work for extension classes because these can't be modified in-place and the alternatives don't work in the general case: - Using a new class that looks and behaves like the original doesn't work because the __new__ method of extension types usually makes sure that it's the same class or a subclass. - Subclassing the class and return the subclass can lead to problems with pickle and will look weird in the Sphinx docs. """ cls.__doc__ = deprecate_doc(cls.__doc__, message) if cls.__new__ is object.__new__: cls.__init__ = deprecate_function(get_function(cls.__init__), message, warning_type) else: cls.__new__ = deprecate_function(get_function(cls.__new__), message, warning_type) return cls def deprecate(obj, message=message, name=name, alternative=alternative, pending=pending, warning_type=warning_type): if obj_type is None: if isinstance(obj, type): obj_type_name = 'class' elif inspect.isfunction(obj): obj_type_name = 'function' elif inspect.ismethod(obj) or isinstance(obj, method_types): obj_type_name = 'method' else: obj_type_name = 'object' else: obj_type_name = obj_type if not name: name = get_function(obj).__name__ altmessage = '' if not message or type(message) is type(deprecate): if pending: message = ('The {func} {obj_type} will be deprecated in a ' 'future version.') else: message = ('The {func} {obj_type} is deprecated and may ' 'be removed in a future version.') if alternative: altmessage = f'\n Use {alternative} instead.' message = ((message.format(**{ 'func': name, 'name': name, 'alternative': alternative, 'obj_type': obj_type_name})) + altmessage) if isinstance(obj, type): return deprecate_class(obj, message, warning_type) else: return deprecate_function(obj, message, warning_type) if type(message) is type(deprecate): return deprecate(message) return deprecate def deprecated_attribute(name, since, message=None, alternative=None, pending=False, warning_type=AstropyDeprecationWarning): """ Used to mark a public attribute as deprecated. This creates a property that will warn when the given attribute name is accessed. To prevent the warning (i.e. for internal code), use the private name for the attribute by prepending an underscore (i.e. ``self._name``). Parameters ---------- name : str The name of the deprecated attribute. since : str The release at which this API became deprecated. This is required. message : str, optional Override the default deprecation message. The format specifier ``name`` may be used for the name of the attribute, and ``alternative`` may be used in the deprecation message to insert the name of an alternative to the deprecated function. alternative : str, optional An alternative attribute that the user may use in place of the deprecated attribute. The deprecation warning will tell the user about this alternative if provided. pending : bool, optional If True, uses a AstropyPendingDeprecationWarning instead of ``warning_type``. warning_type : Warning Warning to be issued. Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`. Examples -------- :: class MyClass: # Mark the old_name as deprecated old_name = misc.deprecated_attribute('old_name', '0.1') def method(self): self._old_name = 42 """ private_name = '_' + name specific_deprecated = deprecated(since, name=name, obj_type='attribute', message=message, alternative=alternative, pending=pending, warning_type=warning_type) @specific_deprecated def get(self): return getattr(self, private_name) @specific_deprecated def set(self, val): setattr(self, private_name, val) @specific_deprecated def delete(self): delattr(self, private_name) return property(get, set, delete) def deprecated_renamed_argument(old_name, new_name, since, arg_in_kwargs=False, relax=False, pending=False, warning_type=AstropyDeprecationWarning, alternative='', message=''): """Deprecate a _renamed_ or _removed_ function argument. The decorator assumes that the argument with the ``old_name`` was removed from the function signature and the ``new_name`` replaced it at the **same position** in the signature. If the ``old_name`` argument is given when calling the decorated function the decorator will catch it and issue a deprecation warning and pass it on as ``new_name`` argument. Parameters ---------- old_name : str or sequence of str The old name of the argument. new_name : str or sequence of str or None The new name of the argument. Set this to `None` to remove the argument ``old_name`` instead of renaming it. since : str or number or sequence of str or number The release at which the old argument became deprecated. arg_in_kwargs : bool or sequence of bool, optional If the argument is not a named argument (for example it was meant to be consumed by ``**kwargs``) set this to ``True``. Otherwise the decorator will throw an Exception if the ``new_name`` cannot be found in the signature of the decorated function. Default is ``False``. relax : bool or sequence of bool, optional If ``False`` a ``TypeError`` is raised if both ``new_name`` and ``old_name`` are given. If ``True`` the value for ``new_name`` is used and a Warning is issued. Default is ``False``. pending : bool or sequence of bool, optional If ``True`` this will hide the deprecation warning and ignore the corresponding ``relax`` parameter value. Default is ``False``. warning_type : Warning Warning to be issued. Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`. alternative : str, optional An alternative function or class name that the user may use in place of the deprecated object if ``new_name`` is None. The deprecation warning will tell the user about this alternative if provided. message : str, optional A custom warning message. If provided then ``since`` and ``alternative`` options will have no effect. Raises ------ TypeError If the new argument name cannot be found in the function signature and arg_in_kwargs was False or if it is used to deprecate the name of the ``*args``-, ``**kwargs``-like arguments. At runtime such an Error is raised if both the new_name and old_name were specified when calling the function and "relax=False". Notes ----- The decorator should be applied to a function where the **name** of an argument was changed but it applies the same logic. .. warning:: If ``old_name`` is a list or tuple the ``new_name`` and ``since`` must also be a list or tuple with the same number of entries. ``relax`` and ``arg_in_kwarg`` can be a single bool (applied to all) or also a list/tuple with the same number of entries like ``new_name``, etc. Examples -------- The deprecation warnings are not shown in the following examples. To deprecate a positional or keyword argument:: >>> from astropy.utils.decorators import deprecated_renamed_argument >>> @deprecated_renamed_argument('sig', 'sigma', '1.0') ... def test(sigma): ... return sigma >>> test(2) 2 >>> test(sigma=2) 2 >>> test(sig=2) # doctest: +SKIP 2 To deprecate an argument caught inside the ``**kwargs`` the ``arg_in_kwargs`` has to be set:: >>> @deprecated_renamed_argument('sig', 'sigma', '1.0', ... arg_in_kwargs=True) ... def test(**kwargs): ... return kwargs['sigma'] >>> test(sigma=2) 2 >>> test(sig=2) # doctest: +SKIP 2 By default providing the new and old keyword will lead to an Exception. If a Warning is desired set the ``relax`` argument:: >>> @deprecated_renamed_argument('sig', 'sigma', '1.0', relax=True) ... def test(sigma): ... return sigma >>> test(sig=2) # doctest: +SKIP 2 It is also possible to replace multiple arguments. The ``old_name``, ``new_name`` and ``since`` have to be `tuple` or `list` and contain the same number of entries:: >>> @deprecated_renamed_argument(['a', 'b'], ['alpha', 'beta'], ... ['1.0', 1.2]) ... def test(alpha, beta): ... return alpha, beta >>> test(a=2, b=3) # doctest: +SKIP (2, 3) In this case ``arg_in_kwargs`` and ``relax`` can be a single value (which is applied to all renamed arguments) or must also be a `tuple` or `list` with values for each of the arguments. """ cls_iter = (list, tuple) if isinstance(old_name, cls_iter): n = len(old_name) # Assume that new_name and since are correct (tuple/list with the # appropriate length) in the spirit of the "consenting adults". But the # optional parameters may not be set, so if these are not iterables # wrap them. if not isinstance(arg_in_kwargs, cls_iter): arg_in_kwargs = [arg_in_kwargs] * n if not isinstance(relax, cls_iter): relax = [relax] * n if not isinstance(pending, cls_iter): pending = [pending] * n if not isinstance(message, cls_iter): message = [message] * n else: # To allow a uniform approach later on, wrap all arguments in lists. n = 1 old_name = [old_name] new_name = [new_name] since = [since] arg_in_kwargs = [arg_in_kwargs] relax = [relax] pending = [pending] message = [message] def decorator(function): # The named arguments of the function. arguments = signature(function).parameters keys = list(arguments.keys()) position = [None] * n for i in range(n): # Determine the position of the argument. if arg_in_kwargs[i]: pass else: if new_name[i] is None: param = arguments[old_name[i]] elif new_name[i] in arguments: param = arguments[new_name[i]] # In case the argument is not found in the list of arguments # the only remaining possibility is that it should be caught # by some kind of **kwargs argument. # This case has to be explicitly specified, otherwise throw # an exception! else: raise TypeError( f'"{new_name[i]}" was not specified in the function ' 'signature. If it was meant to be part of ' '"**kwargs" then set "arg_in_kwargs" to "True"') # There are several possibilities now: # 1.) Positional or keyword argument: if param.kind == param.POSITIONAL_OR_KEYWORD: if new_name[i] is None: position[i] = keys.index(old_name[i]) else: position[i] = keys.index(new_name[i]) # 2.) Keyword only argument: elif param.kind == param.KEYWORD_ONLY: # These cannot be specified by position. position[i] = None # 3.) positional-only argument, varargs, varkwargs or some # unknown type: else: raise TypeError(f'cannot replace argument "{new_name[i]}" ' f'of kind {repr(param.kind)}.') @functools.wraps(function) def wrapper(*args, **kwargs): for i in range(n): msg = message[i] or (f'"{old_name[i]}" was deprecated in ' f'version {since[i]} and will be removed ' 'in a future version. ') # The only way to have oldkeyword inside the function is # that it is passed as kwarg because the oldkeyword # parameter was renamed to newkeyword. if old_name[i] in kwargs: value = kwargs.pop(old_name[i]) # Display the deprecation warning only when it's not # pending. if not pending[i]: if not message[i]: if new_name[i] is not None: msg += f'Use argument "{new_name[i]}" instead.' elif alternative: msg += f'\n Use {alternative} instead.' warnings.warn(msg, warning_type, stacklevel=2) # Check if the newkeyword was given as well. newarg_in_args = (position[i] is not None and len(args) > position[i]) newarg_in_kwargs = new_name[i] in kwargs if newarg_in_args or newarg_in_kwargs: if not pending[i]: # If both are given print a Warning if relax is # True or raise an Exception is relax is False. if relax[i]: warnings.warn( f'"{old_name[i]}" and "{new_name[i]}" ' 'keywords were set. ' f'Using the value of "{new_name[i]}".', AstropyUserWarning) else: raise TypeError( f'cannot specify both "{old_name[i]}" and ' f'"{new_name[i]}".') else: # Pass the value of the old argument with the # name of the new argument to the function if new_name[i] is not None: kwargs[new_name[i]] = value # If old argument has no replacement, cast it back. # https://github.com/astropy/astropy/issues/9914 else: kwargs[old_name[i]] = value # Deprecated keyword without replacement is given as # positional argument. elif (not pending[i] and not new_name[i] and position[i] and len(args) > position[i]): if alternative and not message[i]: msg += f'\n Use {alternative} instead.' warnings.warn(msg, warning_type, stacklevel=2) return function(*args, **kwargs) return wrapper return decorator # TODO: This can still be made to work for setters by implementing an # accompanying metaclass that supports it; we just don't need that right this # second class classproperty(property): """ Similar to `property`, but allows class-level properties. That is, a property whose getter is like a `classmethod`. The wrapped method may explicitly use the `classmethod` decorator (which must become before this decorator), or the `classmethod` may be omitted (it is implicit through use of this decorator). .. note:: classproperty only works for *read-only* properties. It does not currently allow writeable/deletable properties, due to subtleties of how Python descriptors work. In order to implement such properties on a class a metaclass for that class must be implemented. Parameters ---------- fget : callable The function that computes the value of this property (in particular, the function when this is used as a decorator) a la `property`. doc : str, optional The docstring for the property--by default inherited from the getter function. lazy : bool, optional If True, caches the value returned by the first call to the getter function, so that it is only called once (used for lazy evaluation of an attribute). This is analogous to `lazyproperty`. The ``lazy`` argument can also be used when `classproperty` is used as a decorator (see the third example below). When used in the decorator syntax this *must* be passed in as a keyword argument. Examples -------- :: >>> class Foo: ... _bar_internal = 1 ... @classproperty ... def bar(cls): ... return cls._bar_internal + 1 ... >>> Foo.bar 2 >>> foo_instance = Foo() >>> foo_instance.bar 2 >>> foo_instance._bar_internal = 2 >>> foo_instance.bar # Ignores instance attributes 2 As previously noted, a `classproperty` is limited to implementing read-only attributes:: >>> class Foo: ... _bar_internal = 1 ... @classproperty ... def bar(cls): ... return cls._bar_internal ... @bar.setter ... def bar(cls, value): ... cls._bar_internal = value ... Traceback (most recent call last): ... NotImplementedError: classproperty can only be read-only; use a metaclass to implement modifiable class-level properties When the ``lazy`` option is used, the getter is only called once:: >>> class Foo: ... @classproperty(lazy=True) ... def bar(cls): ... print("Performing complicated calculation") ... return 1 ... >>> Foo.bar Performing complicated calculation 1 >>> Foo.bar 1 If a subclass inherits a lazy `classproperty` the property is still re-evaluated for the subclass:: >>> class FooSub(Foo): ... pass ... >>> FooSub.bar Performing complicated calculation 1 >>> FooSub.bar 1 """ def __new__(cls, fget=None, doc=None, lazy=False): if fget is None: # Being used as a decorator--return a wrapper that implements # decorator syntax def wrapper(func): return cls(func, lazy=lazy) return wrapper return super().__new__(cls) def __init__(self, fget, doc=None, lazy=False): self._lazy = lazy if lazy: self._lock = threading.RLock() # Protects _cache self._cache = {} fget = self._wrap_fget(fget) super().__init__(fget=fget, doc=doc) # There is a buglet in Python where self.__doc__ doesn't # get set properly on instances of property subclasses if # the doc argument was used rather than taking the docstring # from fget # Related Python issue: https://bugs.python.org/issue24766 if doc is not None: self.__doc__ = doc def __get__(self, obj, objtype): if self._lazy: val = self._cache.get(objtype, _NotFound) if val is _NotFound: with self._lock: # Check if another thread initialised before we locked. val = self._cache.get(objtype, _NotFound) if val is _NotFound: val = self.fget.__wrapped__(objtype) self._cache[objtype] = val else: # The base property.__get__ will just return self here; # instead we pass objtype through to the original wrapped # function (which takes the class as its sole argument) val = self.fget.__wrapped__(objtype) return val def getter(self, fget): return super().getter(self._wrap_fget(fget)) def setter(self, fset): raise NotImplementedError( "classproperty can only be read-only; use a metaclass to " "implement modifiable class-level properties") def deleter(self, fdel): raise NotImplementedError( "classproperty can only be read-only; use a metaclass to " "implement modifiable class-level properties") @staticmethod def _wrap_fget(orig_fget): if isinstance(orig_fget, classmethod): orig_fget = orig_fget.__func__ # Using stock functools.wraps instead of the fancier version # found later in this module, which is overkill for this purpose @functools.wraps(orig_fget) def fget(obj): return orig_fget(obj.__class__) return fget # Adapted from the recipe at # http://code.activestate.com/recipes/363602-lazy-property-evaluation class lazyproperty(property): """ Works similarly to property(), but computes the value only once. This essentially memorizes the value of the property by storing the result of its computation in the ``__dict__`` of the object instance. This is useful for computing the value of some property that should otherwise be invariant. For example:: >>> class LazyTest: ... @lazyproperty ... def complicated_property(self): ... print('Computing the value for complicated_property...') ... return 42 ... >>> lt = LazyTest() >>> lt.complicated_property Computing the value for complicated_property... 42 >>> lt.complicated_property 42 As the example shows, the second time ``complicated_property`` is accessed, the ``print`` statement is not executed. Only the return value from the first access off ``complicated_property`` is returned. By default, a setter and deleter are used which simply overwrite and delete, respectively, the value stored in ``__dict__``. Any user-specified setter or deleter is executed before executing these default actions. The one exception is that the default setter is not run if the user setter already sets the new value in ``__dict__`` and returns that value and the returned value is not ``None``. """ def __init__(self, fget, fset=None, fdel=None, doc=None): super().__init__(fget, fset, fdel, doc) self._key = self.fget.__name__ self._lock = threading.RLock() def __get__(self, obj, owner=None): try: obj_dict = obj.__dict__ val = obj_dict.get(self._key, _NotFound) if val is _NotFound: with self._lock: # Check if another thread beat us to it. val = obj_dict.get(self._key, _NotFound) if val is _NotFound: val = self.fget(obj) obj_dict[self._key] = val return val except AttributeError: if obj is None: return self raise def __set__(self, obj, val): obj_dict = obj.__dict__ if self.fset: ret = self.fset(obj, val) if ret is not None and obj_dict.get(self._key) is ret: # By returning the value set the setter signals that it # took over setting the value in obj.__dict__; this # mechanism allows it to override the input value return obj_dict[self._key] = val def __delete__(self, obj): if self.fdel: self.fdel(obj) obj.__dict__.pop(self._key, None) # Delete if present class sharedmethod(classmethod): """ This is a method decorator that allows both an instancemethod and a `classmethod` to share the same name. When using `sharedmethod` on a method defined in a class's body, it may be called on an instance, or on a class. In the former case it behaves like a normal instance method (a reference to the instance is automatically passed as the first ``self`` argument of the method):: >>> class Example: ... @sharedmethod ... def identify(self, *args): ... print('self was', self) ... print('additional args were', args) ... >>> ex = Example() >>> ex.identify(1, 2) self was <astropy.utils.decorators.Example object at 0x...> additional args were (1, 2) In the latter case, when the `sharedmethod` is called directly from a class, it behaves like a `classmethod`:: >>> Example.identify(3, 4) self was <class 'astropy.utils.decorators.Example'> additional args were (3, 4) This also supports a more advanced usage, where the `classmethod` implementation can be written separately. If the class's *metaclass* has a method of the same name as the `sharedmethod`, the version on the metaclass is delegated to:: >>> class ExampleMeta(type): ... def identify(self): ... print('this implements the {0}.identify ' ... 'classmethod'.format(self.__name__)) ... >>> class Example(metaclass=ExampleMeta): ... @sharedmethod ... def identify(self): ... print('this implements the instancemethod') ... >>> Example().identify() this implements the instancemethod >>> Example.identify() this implements the Example.identify classmethod """ def __get__(self, obj, objtype=None): if obj is None: mcls = type(objtype) clsmeth = getattr(mcls, self.__func__.__name__, None) if callable(clsmeth): func = clsmeth else: func = self.__func__ return self._make_method(func, objtype) else: return self._make_method(self.__func__, obj) @staticmethod def _make_method(func, instance): return types.MethodType(func, instance) def format_doc(docstring, *args, **kwargs): """ Replaces the docstring of the decorated object and then formats it. The formatting works like :meth:`str.format` and if the decorated object already has a docstring this docstring can be included in the new documentation if you use the ``{__doc__}`` placeholder. Its primary use is for reusing a *long* docstring in multiple functions when it is the same or only slightly different between them. Parameters ---------- docstring : str or object or None The docstring that will replace the docstring of the decorated object. If it is an object like a function or class it will take the docstring of this object. If it is a string it will use the string itself. One special case is if the string is ``None`` then it will use the decorated functions docstring and formats it. args : passed to :meth:`str.format`. kwargs : passed to :meth:`str.format`. If the function has a (not empty) docstring the original docstring is added to the kwargs with the keyword ``'__doc__'``. Raises ------ ValueError If the ``docstring`` (or interpreted docstring if it was ``None`` or not a string) is empty. IndexError, KeyError If a placeholder in the (interpreted) ``docstring`` was not filled. see :meth:`str.format` for more information. Notes ----- Using this decorator allows, for example Sphinx, to parse the correct docstring. Examples -------- Replacing the current docstring is very easy:: >>> from astropy.utils.decorators import format_doc >>> @format_doc('''Perform num1 + num2''') ... def add(num1, num2): ... return num1+num2 ... >>> help(add) # doctest: +SKIP Help on function add in module __main__: <BLANKLINE> add(num1, num2) Perform num1 + num2 sometimes instead of replacing you only want to add to it:: >>> doc = ''' ... {__doc__} ... Parameters ... ---------- ... num1, num2 : Numbers ... Returns ... ------- ... result: Number ... ''' >>> @format_doc(doc) ... def add(num1, num2): ... '''Perform addition.''' ... return num1+num2 ... >>> help(add) # doctest: +SKIP Help on function add in module __main__: <BLANKLINE> add(num1, num2) Perform addition. Parameters ---------- num1, num2 : Numbers Returns ------- result : Number in case one might want to format it further:: >>> doc = ''' ... Perform {0}. ... Parameters ... ---------- ... num1, num2 : Numbers ... Returns ... ------- ... result: Number ... result of num1 {op} num2 ... {__doc__} ... ''' >>> @format_doc(doc, 'addition', op='+') ... def add(num1, num2): ... return num1+num2 ... >>> @format_doc(doc, 'subtraction', op='-') ... def subtract(num1, num2): ... '''Notes: This one has additional notes.''' ... return num1-num2 ... >>> help(add) # doctest: +SKIP Help on function add in module __main__: <BLANKLINE> add(num1, num2) Perform addition. Parameters ---------- num1, num2 : Numbers Returns ------- result : Number result of num1 + num2 >>> help(subtract) # doctest: +SKIP Help on function subtract in module __main__: <BLANKLINE> subtract(num1, num2) Perform subtraction. Parameters ---------- num1, num2 : Numbers Returns ------- result : Number result of num1 - num2 Notes : This one has additional notes. These methods can be combined; even taking the docstring from another object is possible as docstring attribute. You just have to specify the object:: >>> @format_doc(add) ... def another_add(num1, num2): ... return num1 + num2 ... >>> help(another_add) # doctest: +SKIP Help on function another_add in module __main__: <BLANKLINE> another_add(num1, num2) Perform addition. Parameters ---------- num1, num2 : Numbers Returns ------- result : Number result of num1 + num2 But be aware that this decorator *only* formats the given docstring not the strings passed as ``args`` or ``kwargs`` (not even the original docstring):: >>> @format_doc(doc, 'addition', op='+') ... def yet_another_add(num1, num2): ... '''This one is good for {0}.''' ... return num1 + num2 ... >>> help(yet_another_add) # doctest: +SKIP Help on function yet_another_add in module __main__: <BLANKLINE> yet_another_add(num1, num2) Perform addition. Parameters ---------- num1, num2 : Numbers Returns ------- result : Number result of num1 + num2 This one is good for {0}. To work around it you could specify the docstring to be ``None``:: >>> @format_doc(None, 'addition') ... def last_add_i_swear(num1, num2): ... '''This one is good for {0}.''' ... return num1 + num2 ... >>> help(last_add_i_swear) # doctest: +SKIP Help on function last_add_i_swear in module __main__: <BLANKLINE> last_add_i_swear(num1, num2) This one is good for addition. Using it with ``None`` as docstring allows to use the decorator twice on an object to first parse the new docstring and then to parse the original docstring or the ``args`` and ``kwargs``. """ def set_docstring(obj): if docstring is None: # None means: use the objects __doc__ doc = obj.__doc__ # Delete documentation in this case so we don't end up with # awkwardly self-inserted docs. obj.__doc__ = None elif isinstance(docstring, str): # String: use the string that was given doc = docstring else: # Something else: Use the __doc__ of this doc = docstring.__doc__ if not doc: # In case the docstring is empty it's probably not what was wanted. raise ValueError('docstring must be a string or containing a ' 'docstring that is not empty.') # If the original has a not-empty docstring append it to the format # kwargs. kwargs['__doc__'] = obj.__doc__ or '' obj.__doc__ = doc.format(*args, **kwargs) return obj return set_docstring
1c8958ce5877ada632b7df08ad23525460cc81a037e375741714f639cffebad9
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """This module contains functions and methods that relate to the DataInfo class which provides a container for informational attributes as well as summary info methods. A DataInfo object is attached to the Quantity, SkyCoord, and Time classes in astropy. Here it allows those classes to be used in Tables and uniformly carry table column attributes such as name, format, dtype, meta, and description. """ # Note: these functions and classes are tested extensively in astropy table # tests via their use in providing mixin column info, and in # astropy/tests/test_info for providing table and column info summary data. import os import re import sys import weakref import warnings from io import StringIO from copy import deepcopy from functools import partial from collections import OrderedDict from contextlib import contextmanager import numpy as np from . import metadata __all__ = ['data_info_factory', 'dtype_info_name', 'BaseColumnInfo', 'DataInfo', 'MixinInfo', 'ParentDtypeInfo'] # Tuple of filterwarnings kwargs to ignore when calling info IGNORE_WARNINGS = (dict(category=RuntimeWarning, message='All-NaN|' 'Mean of empty slice|Degrees of freedom <= 0|' 'invalid value encountered in sqrt'),) @contextmanager def serialize_context_as(context): """Set context for serialization. This will allow downstream code to understand the context in which a column is being serialized. Objects like Time or SkyCoord will have different default serialization representations depending on context. Parameters ---------- context : str Context name, e.g. 'fits', 'hdf5', 'parquet', 'ecsv', 'yaml' """ old_context = BaseColumnInfo._serialize_context BaseColumnInfo._serialize_context = context try: yield finally: BaseColumnInfo._serialize_context = old_context def dtype_info_name(dtype): """Return a human-oriented string name of the ``dtype`` arg. This can be use by astropy methods that present type information about a data object. The output is mostly equivalent to ``dtype.name`` which takes the form <type_name>[B] where <type_name> is like ``int`` or ``bool`` and [B] is an optional number of bits which gets included only for numeric types. The output is shown below for ``bytes`` and ``str`` types, with <N> being the number of characters. This representation corresponds to the Python type that matches the dtype:: Numpy S<N> U<N> Python bytes<N> str<N> Parameters ---------- dtype : str, `~numpy.dtype`, type Input as an object that can be converted via :class:`numpy.dtype`. Returns ------- dtype_info_name : str String name of ``dtype`` """ dtype = np.dtype(dtype) if dtype.names is not None: return '({})'.format(', '.join(dtype_info_name(dt[0]) for dt in dtype.fields.values())) if dtype.subdtype is not None: dtype, shape = dtype.subdtype else: shape = () if dtype.kind in ('S', 'U'): type_name = 'bytes' if dtype.kind == 'S' else 'str' length = re.search(r'(\d+)', dtype.str).group(1) out = type_name + length else: out = dtype.name if shape: out += f"[{','.join(str(n) for n in shape)}]" return out def data_info_factory(names, funcs): """ Factory to create a function that can be used as an ``option`` for outputting data object summary information. Examples -------- >>> from astropy.utils.data_info import data_info_factory >>> from astropy.table import Column >>> c = Column([4., 3., 2., 1.]) >>> mystats = data_info_factory(names=['min', 'median', 'max'], ... funcs=[np.min, np.median, np.max]) >>> c.info(option=mystats) min = 1 median = 2.5 max = 4 n_bad = 0 length = 4 Parameters ---------- names : list List of information attribute names funcs : list List of functions that compute the corresponding information attribute Returns ------- func : function Function that can be used as a data info option """ def func(dat): outs = [] for name, func in zip(names, funcs): try: if isinstance(func, str): out = getattr(dat, func)() else: out = func(dat) except Exception: outs.append('--') else: try: outs.append(f'{out:g}') except (TypeError, ValueError): outs.append(str(out)) return OrderedDict(zip(names, outs)) return func def _get_obj_attrs_map(obj, attrs): """ Get the values for object ``attrs`` and return as a dict. This ignores any attributes that are None. In the context of serializing the supported core astropy classes this conversion will succeed and results in more succinct and less python-specific YAML. """ out = {} for attr in attrs: val = getattr(obj, attr, None) if val is not None: out[attr] = val return out def _get_data_attribute(dat, attr=None): """ Get a data object attribute for the ``attributes`` info summary method """ if attr == 'class': val = type(dat).__name__ elif attr == 'dtype': val = dtype_info_name(dat.info.dtype) elif attr == 'shape': datshape = dat.shape[1:] val = datshape if datshape else '' else: val = getattr(dat.info, attr) if val is None: val = '' return str(val) class InfoAttribute: def __init__(self, attr, default=None): self.attr = attr self.default = default def __get__(self, instance, owner_cls): if instance is None: return self return instance._attrs.get(self.attr, self.default) def __set__(self, instance, value): if instance is None: # This is an unbound descriptor on the class raise ValueError('cannot set unbound descriptor') instance._attrs[self.attr] = value class ParentAttribute: def __init__(self, attr): self.attr = attr def __get__(self, instance, owner_cls): if instance is None: return self return getattr(instance._parent, self.attr) def __set__(self, instance, value): if instance is None: # This is an unbound descriptor on the class raise ValueError('cannot set unbound descriptor') setattr(instance._parent, self.attr, value) class DataInfoMeta(type): def __new__(mcls, name, bases, dct): # Ensure that we do not gain a __dict__, which would mean # arbitrary attributes could be set. dct.setdefault('__slots__', []) return super().__new__(mcls, name, bases, dct) def __init__(cls, name, bases, dct): super().__init__(name, bases, dct) # Define default getters/setters for attributes, if needed. for attr in cls.attr_names: if attr not in dct: # If not defined explicitly for this class, did any of # its superclasses define it, and, if so, was this an # automatically defined look-up-on-parent attribute? cls_attr = getattr(cls, attr, None) if attr in cls.attrs_from_parent: # If the attribute is supposed to be stored on the parent, # and that is stated by this class yet it was not the case # on the superclass, override it. if 'attrs_from_parent' in dct and not isinstance(cls_attr, ParentAttribute): setattr(cls, attr, ParentAttribute(attr)) elif not cls_attr or isinstance(cls_attr, ParentAttribute): # If the attribute is not meant to be stored on the parent, # and if it was not defined already or was previously defined # as an attribute on the parent, define a regular # look-up-on-info attribute setattr(cls, attr, InfoAttribute(attr, cls._attr_defaults.get(attr))) class DataInfo(metaclass=DataInfoMeta): """ Descriptor that data classes use to add an ``info`` attribute for storing data attributes in a uniform and portable way. Note that it *must* be called ``info`` so that the DataInfo() object can be stored in the ``instance`` using the ``info`` key. Because owner_cls.x is a descriptor, Python doesn't use __dict__['x'] normally, and the descriptor can safely store stuff there. Thanks to https://nbviewer.jupyter.org/urls/gist.github.com/ChrisBeaumont/5758381/raw/descriptor_writeup.ipynb for this trick that works for non-hashable classes. Parameters ---------- bound : bool If True this is a descriptor attribute in a class definition, else it is a DataInfo() object that is bound to a data object instance. Default is False. """ _stats = ['mean', 'std', 'min', 'max'] attrs_from_parent = set() attr_names = set(['name', 'unit', 'dtype', 'format', 'description', 'meta']) _attr_defaults = {'dtype': np.dtype('O')} _attrs_no_copy = set() _info_summary_attrs = ('dtype', 'shape', 'unit', 'format', 'description', 'class') __slots__ = ['_parent_cls', '_parent_ref', '_attrs'] # This specifies the list of object attributes which must be stored in # order to re-create the object after serialization. This is independent # of normal `info` attributes like name or description. Subclasses will # generally either define this statically (QuantityInfo) or dynamically # (SkyCoordInfo). These attributes may be scalars or arrays. If arrays # that match the object length they will be serialized as an independent # column. _represent_as_dict_attrs = () # This specifies attributes which are to be provided to the class # initializer as ordered args instead of keyword args. This is needed # for Quantity subclasses where the keyword for data varies (e.g. # between Quantity and Angle). _construct_from_dict_args = () # This specifies the name of an attribute which is the "primary" data. # Then when representing as columns # (table.serialize._represent_mixin_as_column) the output for this # attribute will be written with the just name of the mixin instead of the # usual "<name>.<attr>". _represent_as_dict_primary_data = None def __init__(self, bound=False): # If bound to a data object instance then create the dict of attributes # which stores the info attribute values. Default of None for "unset" # except for dtype where the default is object. if bound: self._attrs = {} @property def _parent(self): try: parent = self._parent_ref() except AttributeError: return None if parent is None: raise AttributeError("""\ failed to access "info" attribute on a temporary object. It looks like you have done something like ``col[3:5].info`` or ``col.quantity.info``, i.e. you accessed ``info`` from a temporary slice object that only exists momentarily. This has failed because the reference to that temporary object is now lost. Instead force a permanent reference (e.g. ``c = col[3:5]`` followed by ``c.info``).""") return parent def __get__(self, instance, owner_cls): if instance is None: # This is an unbound descriptor on the class self._parent_cls = owner_cls return self info = instance.__dict__.get('info') if info is None: info = instance.__dict__['info'] = self.__class__(bound=True) # We set _parent_ref on every call, since if one makes copies of # instances, 'info' will be copied as well, which will lose the # reference. info._parent_ref = weakref.ref(instance) return info def __set__(self, instance, value): if instance is None: # This is an unbound descriptor on the class raise ValueError('cannot set unbound descriptor') if isinstance(value, DataInfo): info = instance.__dict__['info'] = self.__class__(bound=True) attr_names = info.attr_names if value.__class__ is self.__class__: # For same class, attributes are guaranteed to be stored in # _attrs, so speed matters up by not accessing defaults. # Doing this before difference in for loop helps speed. attr_names = attr_names & set(value._attrs) # NOT in-place! else: # For different classes, copy over the attributes in common. attr_names = attr_names & (value.attr_names - value._attrs_no_copy) for attr in attr_names - info.attrs_from_parent - info._attrs_no_copy: info._attrs[attr] = deepcopy(getattr(value, attr)) else: raise TypeError('info must be set with a DataInfo instance') def __getstate__(self): return self._attrs def __setstate__(self, state): self._attrs = state def _represent_as_dict(self, attrs=None): """Get the values for the parent ``attrs`` and return as a dict. By default, uses '_represent_as_dict_attrs'. """ if attrs is None: attrs = self._represent_as_dict_attrs return _get_obj_attrs_map(self._parent, attrs) def _construct_from_dict(self, map): args = [map.pop(attr) for attr in self._construct_from_dict_args] return self._parent_cls(*args, **map) info_summary_attributes = staticmethod( data_info_factory(names=_info_summary_attrs, funcs=[partial(_get_data_attribute, attr=attr) for attr in _info_summary_attrs])) # No nan* methods in numpy < 1.8 info_summary_stats = staticmethod( data_info_factory(names=_stats, funcs=[getattr(np, 'nan' + stat) for stat in _stats])) def __call__(self, option='attributes', out=''): """ Write summary information about data object to the ``out`` filehandle. By default this prints to standard output via sys.stdout. The ``option`` argument specifies what type of information to include. This can be a string, a function, or a list of strings or functions. Built-in options are: - ``attributes``: data object attributes like ``dtype`` and ``format`` - ``stats``: basic statistics: min, mean, and max If a function is specified then that function will be called with the data object as its single argument. The function must return an OrderedDict containing the information attributes. If a list is provided then the information attributes will be appended for each of the options, in order. Examples -------- >>> from astropy.table import Column >>> c = Column([1, 2], unit='m', dtype='int32') >>> c.info() dtype = int32 unit = m class = Column n_bad = 0 length = 2 >>> c.info(['attributes', 'stats']) dtype = int32 unit = m class = Column mean = 1.5 std = 0.5 min = 1 max = 2 n_bad = 0 length = 2 Parameters ---------- option : str, callable, list of (str or callable) Info option, defaults to 'attributes'. out : file-like, None Output destination, defaults to sys.stdout. If None then the OrderedDict with information attributes is returned Returns ------- info : `~collections.OrderedDict` or None `~collections.OrderedDict` if out==None else None """ if out == '': out = sys.stdout dat = self._parent info = OrderedDict() name = dat.info.name if name is not None: info['name'] = name options = option if isinstance(option, (list, tuple)) else [option] for option in options: if isinstance(option, str): if hasattr(self, 'info_summary_' + option): option = getattr(self, 'info_summary_' + option) else: raise ValueError('option={} is not an allowed information type' .format(option)) with warnings.catch_warnings(): for ignore_kwargs in IGNORE_WARNINGS: warnings.filterwarnings('ignore', **ignore_kwargs) info.update(option(dat)) if hasattr(dat, 'mask'): n_bad = np.count_nonzero(dat.mask) else: try: n_bad = np.count_nonzero(np.isinf(dat) | np.isnan(dat)) except Exception: n_bad = 0 info['n_bad'] = n_bad try: info['length'] = len(dat) except (TypeError, IndexError): pass if out is None: return info for key, val in info.items(): if val != '': out.write(f'{key} = {val}' + os.linesep) def __repr__(self): if self._parent is None: return super().__repr__() out = StringIO() self.__call__(out=out) return out.getvalue() class BaseColumnInfo(DataInfo): """ Base info class for anything that can be a column in an astropy Table. There are at least two classes that inherit from this: ColumnInfo: for native astropy Column / MaskedColumn objects MixinInfo: for mixin column objects Note that this class is defined here so that mixins can use it without importing the table package. """ attr_names = DataInfo.attr_names | {'parent_table', 'indices'} _attrs_no_copy = set(['parent_table', 'indices']) # Context for serialization. This can be set temporarily via # ``serialize_context_as(context)`` context manager to allow downstream # code to understand the context in which a column is being serialized. # Typical values are 'fits', 'hdf5', 'parquet', 'ecsv', 'yaml'. Objects # like Time or SkyCoord will have different default serialization # representations depending on context. _serialize_context = None __slots__ = ['_format_funcs', '_copy_indices'] @property def parent_table(self): value = self._attrs.get('parent_table') if callable(value): value = value() return value @parent_table.setter def parent_table(self, parent_table): if parent_table is None: self._attrs.pop('parent_table', None) else: parent_table = weakref.ref(parent_table) self._attrs['parent_table'] = parent_table def __init__(self, bound=False): super().__init__(bound=bound) # If bound to a data object instance then add a _format_funcs dict # for caching functions for print formatting. if bound: self._format_funcs = {} def __set__(self, instance, value): # For Table columns do not set `info` when the instance is a scalar. try: if not instance.shape: return except AttributeError: pass super().__set__(instance, value) def iter_str_vals(self): """ This is a mixin-safe version of Column.iter_str_vals. """ col = self._parent if self.parent_table is None: from astropy.table.column import FORMATTER as formatter else: formatter = self.parent_table.formatter _pformat_col_iter = formatter._pformat_col_iter for str_val in _pformat_col_iter(col, -1, False, False, {}): yield str_val @property def indices(self): # Implementation note: the auto-generation as an InfoAttribute cannot # be used here, since on access, one should not just return the # default (empty list is this case), but set _attrs['indices'] so that # if the list is appended to, it is registered here. return self._attrs.setdefault('indices', []) @indices.setter def indices(self, indices): self._attrs['indices'] = indices def adjust_indices(self, index, value, col_len): ''' Adjust info indices after column modification. Parameters ---------- index : slice, int, list, or ndarray Element(s) of column to modify. This parameter can be a single row number, a list of row numbers, an ndarray of row numbers, a boolean ndarray (a mask), or a column slice. value : int, list, or ndarray New value(s) to insert col_len : int Length of the column ''' if not self.indices: return if isinstance(index, slice): # run through each key in slice t = index.indices(col_len) keys = list(range(*t)) elif isinstance(index, np.ndarray) and index.dtype.kind == 'b': # boolean mask keys = np.where(index)[0] else: # single int keys = [index] value = np.atleast_1d(value) # turn array(x) into array([x]) if value.size == 1: # repeat single value value = list(value) * len(keys) for key, val in zip(keys, value): for col_index in self.indices: col_index.replace(key, self.name, val) def slice_indices(self, col_slice, item, col_len): ''' Given a sliced object, modify its indices to correctly represent the slice. Parameters ---------- col_slice : `~astropy.table.Column` or mixin Sliced object. If not a column, it must be a valid mixin, see https://docs.astropy.org/en/stable/table/mixin_columns.html item : slice, list, or ndarray Slice used to create col_slice col_len : int Length of original object ''' from astropy.table.sorted_array import SortedArray if not getattr(self, '_copy_indices', True): # Necessary because MaskedArray will perform a shallow copy col_slice.info.indices = [] return col_slice elif isinstance(item, slice): col_slice.info.indices = [x[item] for x in self.indices] elif self.indices: if isinstance(item, np.ndarray) and item.dtype.kind == 'b': # boolean mask item = np.where(item)[0] # Empirical testing suggests that recreating a BST/RBT index is # more effective than relabelling when less than ~60% of # the total number of rows are involved, and is in general # more effective for SortedArray. small = len(item) <= 0.6 * col_len col_slice.info.indices = [] for index in self.indices: if small or isinstance(index, SortedArray): new_index = index.get_slice(col_slice, item) else: new_index = deepcopy(index) new_index.replace_rows(item) col_slice.info.indices.append(new_index) return col_slice @staticmethod def merge_cols_attributes(cols, metadata_conflicts, name, attrs): """ Utility method to merge and validate the attributes ``attrs`` for the input table columns ``cols``. Note that ``dtype`` and ``shape`` attributes are handled specially. These should not be passed in ``attrs`` but will always be in the returned dict of merged attributes. Parameters ---------- cols : list List of input Table column objects metadata_conflicts : str ('warn'|'error'|'silent') How to handle metadata conflicts name : str Output column name attrs : list List of attribute names to be merged Returns ------- attrs : dict Of merged attributes. """ from astropy.table.np_utils import TableMergeError def warn_str_func(key, left, right): out = ("In merged column '{}' the '{}' attribute does not match " "({} != {}). Using {} for merged output" .format(name, key, left, right, right)) return out def getattrs(col): return {attr: getattr(col.info, attr) for attr in attrs if getattr(col.info, attr, None) is not None} out = getattrs(cols[0]) for col in cols[1:]: out = metadata.merge(out, getattrs(col), metadata_conflicts=metadata_conflicts, warn_str_func=warn_str_func) # Output dtype is the superset of all dtypes in in_cols out['dtype'] = metadata.common_dtype(cols) # Make sure all input shapes are the same uniq_shapes = set(col.shape[1:] for col in cols) if len(uniq_shapes) != 1: raise TableMergeError('columns have different shapes') out['shape'] = uniq_shapes.pop() # "Merged" output name is the supplied name if name is not None: out['name'] = name return out def get_sortable_arrays(self): """ Return a list of arrays which can be lexically sorted to represent the order of the parent column. The base method raises NotImplementedError and must be overridden. Returns ------- arrays : list of ndarray """ raise NotImplementedError(f'column {self.name} is not sortable') class MixinInfo(BaseColumnInfo): @property def name(self): return self._attrs.get('name') @name.setter def name(self, name): # For mixin columns that live within a table, rename the column in the # table when setting the name attribute. This mirrors the same # functionality in the BaseColumn class. if self.parent_table is not None: new_name = None if name is None else str(name) self.parent_table.columns._rename_column(self.name, new_name) self._attrs['name'] = name @property def groups(self): # This implementation for mixin columns essentially matches the Column # property definition. `groups` is a read-only property here and # depends on the parent table of the column having `groups`. This will # allow aggregating mixins as long as they support those operations. from astropy.table import groups return self._attrs.setdefault('groups', groups.ColumnGroups(self._parent)) class ParentDtypeInfo(MixinInfo): """Mixin that gets info.dtype from parent""" attrs_from_parent = set(['dtype']) # dtype and unit taken from parent
32743e0748e5d6298d4da77ae301fdbdd39407397a868cef92eb531ac3b6de33
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from abc import ABCMeta, abstractmethod from copy import deepcopy import weakref # from astropy.utils.compat import ignored from astropy import log from astropy.units import Unit, Quantity, UnitConversionError __all__ = ['MissingDataAssociationException', 'IncompatibleUncertaintiesException', 'NDUncertainty', 'StdDevUncertainty', 'UnknownUncertainty', 'VarianceUncertainty', 'InverseVariance'] class IncompatibleUncertaintiesException(Exception): """This exception should be used to indicate cases in which uncertainties with two different classes can not be propagated. """ class MissingDataAssociationException(Exception): """This exception should be used to indicate that an uncertainty instance has not been associated with a parent `~astropy.nddata.NDData` object. """ class NDUncertainty(metaclass=ABCMeta): """This is the metaclass for uncertainty classes used with `NDData`. Parameters ---------- array : any type, optional The array or value (the parameter name is due to historical reasons) of the uncertainty. `numpy.ndarray`, `~astropy.units.Quantity` or `NDUncertainty` subclasses are recommended. If the `array` is `list`-like or `numpy.ndarray`-like it will be cast to a plain `numpy.ndarray`. Default is ``None``. unit : unit-like, optional Unit for the uncertainty ``array``. Strings that can be converted to a `~astropy.units.Unit` are allowed. Default is ``None``. copy : `bool`, optional Indicates whether to save the `array` as a copy. ``True`` copies it before saving, while ``False`` tries to save every parameter as reference. Note however that it is not always possible to save the input as reference. Default is ``True``. Raises ------ IncompatibleUncertaintiesException If given another `NDUncertainty`-like class as ``array`` if their ``uncertainty_type`` is different. """ def __init__(self, array=None, copy=True, unit=None): if isinstance(array, NDUncertainty): # Given an NDUncertainty class or subclass check that the type # is the same. if array.uncertainty_type != self.uncertainty_type: raise IncompatibleUncertaintiesException # Check if two units are given and take the explicit one then. if (unit is not None and unit != array._unit): # TODO : Clarify it (see NDData.init for same problem)? log.info("overwriting Uncertainty's current " "unit with specified unit.") elif array._unit is not None: unit = array.unit array = array.array elif isinstance(array, Quantity): # Check if two units are given and take the explicit one then. if (unit is not None and array.unit is not None and unit != array.unit): log.info("overwriting Quantity's current " "unit with specified unit.") elif array.unit is not None: unit = array.unit array = array.value if unit is None: self._unit = None else: self._unit = Unit(unit) if copy: array = deepcopy(array) unit = deepcopy(unit) self.array = array self.parent_nddata = None # no associated NDData - until it is set! @property @abstractmethod def uncertainty_type(self): """`str` : Short description of the type of uncertainty. Defined as abstract property so subclasses *have* to override this. """ return None @property def supports_correlated(self): """`bool` : Supports uncertainty propagation with correlated \ uncertainties? .. versionadded:: 1.2 """ return False @property def array(self): """`numpy.ndarray` : the uncertainty's value. """ return self._array @array.setter def array(self, value): if isinstance(value, (list, np.ndarray)): value = np.array(value, subok=False, copy=False) self._array = value @property def unit(self): """`~astropy.units.Unit` : The unit of the uncertainty, if any. """ return self._unit @unit.setter def unit(self, value): """ The unit should be set to a value consistent with the parent NDData unit and the uncertainty type. """ if value is not None: # Check the hidden attribute below, not the property. The property # raises an exception if there is no parent_nddata. if self._parent_nddata is not None: parent_unit = self.parent_nddata.unit try: # Check for consistency with the unit of the parent_nddata self._data_unit_to_uncertainty_unit(parent_unit).to(value) except UnitConversionError: raise UnitConversionError("Unit {} is incompatible " "with unit {} of parent " "nddata".format(value, parent_unit)) self._unit = Unit(value) else: self._unit = value @property def quantity(self): """ This uncertainty as an `~astropy.units.Quantity` object. """ return Quantity(self.array, self.unit, copy=False, dtype=self.array.dtype) @property def parent_nddata(self): """`NDData` : reference to `NDData` instance with this uncertainty. In case the reference is not set uncertainty propagation will not be possible since propagation might need the uncertain data besides the uncertainty. """ no_parent_message = "uncertainty is not associated with an NDData object" parent_lost_message = ( "the associated NDData object was deleted and cannot be accessed " "anymore. You can prevent the NDData object from being deleted by " "assigning it to a variable. If this happened after unpickling " "make sure you pickle the parent not the uncertainty directly." ) try: parent = self._parent_nddata except AttributeError: raise MissingDataAssociationException(no_parent_message) else: if parent is None: raise MissingDataAssociationException(no_parent_message) else: # The NDData is saved as weak reference so we must call it # to get the object the reference points to. However because # we have a weak reference here it's possible that the parent # was deleted because its reference count dropped to zero. if isinstance(self._parent_nddata, weakref.ref): resolved_parent = self._parent_nddata() if resolved_parent is None: log.info(parent_lost_message) return resolved_parent else: log.info("parent_nddata should be a weakref to an NDData " "object.") return self._parent_nddata @parent_nddata.setter def parent_nddata(self, value): if value is not None and not isinstance(value, weakref.ref): # Save a weak reference on the uncertainty that points to this # instance of NDData. Direct references should NOT be used: # https://github.com/astropy/astropy/pull/4799#discussion_r61236832 value = weakref.ref(value) # Set _parent_nddata here and access below with the property because value # is a weakref self._parent_nddata = value # set uncertainty unit to that of the parent if it was not already set, unless initializing # with empty parent (Value=None) if value is not None: parent_unit = self.parent_nddata.unit if self.unit is None: if parent_unit is None: self.unit = None else: # Set the uncertainty's unit to the appropriate value self.unit = self._data_unit_to_uncertainty_unit(parent_unit) else: # Check that units of uncertainty are compatible with those of # the parent. If they are, no need to change units of the # uncertainty or the data. If they are not, let the user know. unit_from_data = self._data_unit_to_uncertainty_unit(parent_unit) try: unit_from_data.to(self.unit) except UnitConversionError: raise UnitConversionError("Unit {} of uncertainty " "incompatible with unit {} of " "data".format(self.unit, parent_unit)) @abstractmethod def _data_unit_to_uncertainty_unit(self, value): """ Subclasses must override this property. It should take in a data unit and return the correct unit for the uncertainty given the uncertainty type. """ return None def __repr__(self): prefix = self.__class__.__name__ + '(' try: body = np.array2string(self.array, separator=', ', prefix=prefix) except AttributeError: # In case it wasn't possible to use array2string body = str(self.array) return ''.join([prefix, body, ')']) def __getstate__(self): # Because of the weak reference the class wouldn't be picklable. try: return self._array, self._unit, self.parent_nddata except MissingDataAssociationException: # In case there's no parent return self._array, self._unit, None def __setstate__(self, state): if len(state) != 3: raise TypeError('The state should contain 3 items.') self._array = state[0] self._unit = state[1] parent = state[2] if parent is not None: parent = weakref.ref(parent) self._parent_nddata = parent def __getitem__(self, item): """Normal slicing on the array, keep the unit and return a reference. """ return self.__class__(self.array[item], unit=self.unit, copy=False) def propagate(self, operation, other_nddata, result_data, correlation): """Calculate the resulting uncertainty given an operation on the data. .. versionadded:: 1.2 Parameters ---------- operation : callable The operation that is performed on the `NDData`. Supported are `numpy.add`, `numpy.subtract`, `numpy.multiply` and `numpy.true_divide` (or `numpy.divide`). other_nddata : `NDData` instance The second operand in the arithmetic operation. result_data : `~astropy.units.Quantity` or ndarray The result of the arithmetic operations on the data. correlation : `numpy.ndarray` or number The correlation (rho) is defined between the uncertainties in sigma_AB = sigma_A * sigma_B * rho. A value of ``0`` means uncorrelated operands. Returns ------- resulting_uncertainty : `NDUncertainty` instance Another instance of the same `NDUncertainty` subclass containing the uncertainty of the result. Raises ------ ValueError If the ``operation`` is not supported or if correlation is not zero but the subclass does not support correlated uncertainties. Notes ----- First this method checks if a correlation is given and the subclass implements propagation with correlated uncertainties. Then the second uncertainty is converted (or an Exception is raised) to the same class in order to do the propagation. Then the appropriate propagation method is invoked and the result is returned. """ # Check if the subclass supports correlation if not self.supports_correlated: if isinstance(correlation, np.ndarray) or correlation != 0: raise ValueError("{} does not support uncertainty propagation" " with correlation." "".format(self.__class__.__name__)) # Get the other uncertainty (and convert it to a matching one) other_uncert = self._convert_uncertainty(other_nddata.uncertainty) if operation.__name__ == 'add': result = self._propagate_add(other_uncert, result_data, correlation) elif operation.__name__ == 'subtract': result = self._propagate_subtract(other_uncert, result_data, correlation) elif operation.__name__ == 'multiply': result = self._propagate_multiply(other_uncert, result_data, correlation) elif operation.__name__ in ['true_divide', 'divide']: result = self._propagate_divide(other_uncert, result_data, correlation) else: raise ValueError('unsupported operation') return self.__class__(result, copy=False) def _convert_uncertainty(self, other_uncert): """Checks if the uncertainties are compatible for propagation. Checks if the other uncertainty is `NDUncertainty`-like and if so verify that the uncertainty_type is equal. If the latter is not the case try returning ``self.__class__(other_uncert)``. Parameters ---------- other_uncert : `NDUncertainty` subclass The other uncertainty. Returns ------- other_uncert : `NDUncertainty` subclass but converted to a compatible `NDUncertainty` subclass if possible and necessary. Raises ------ IncompatibleUncertaintiesException: If the other uncertainty cannot be converted to a compatible `NDUncertainty` subclass. """ if isinstance(other_uncert, NDUncertainty): if self.uncertainty_type == other_uncert.uncertainty_type: return other_uncert else: return self.__class__(other_uncert) else: raise IncompatibleUncertaintiesException @abstractmethod def _propagate_add(self, other_uncert, result_data, correlation): return None @abstractmethod def _propagate_subtract(self, other_uncert, result_data, correlation): return None @abstractmethod def _propagate_multiply(self, other_uncert, result_data, correlation): return None @abstractmethod def _propagate_divide(self, other_uncert, result_data, correlation): return None def represent_as(self, other_uncert): """Convert this uncertainty to a different uncertainty type. Parameters ---------- other_uncert : `NDUncertainty` subclass The `NDUncertainty` subclass to convert to. Returns ------- resulting_uncertainty : `NDUncertainty` instance An instance of ``other_uncert`` subclass containing the uncertainty converted to the new uncertainty type. Raises ------ TypeError If either the initial or final subclasses do not support conversion, a `TypeError` is raised. """ as_variance = getattr(self, "_convert_to_variance", None) if as_variance is None: raise TypeError( f"{type(self)} does not support conversion to another " "uncertainty type." ) from_variance = getattr(other_uncert, "_convert_from_variance", None) if from_variance is None: raise TypeError( f"{other_uncert.__name__} does not support conversion from " "another uncertainty type." ) return from_variance(as_variance()) class UnknownUncertainty(NDUncertainty): """This class implements any unknown uncertainty type. The main purpose of having an unknown uncertainty class is to prevent uncertainty propagation. Parameters ---------- args, kwargs : see `NDUncertainty` """ @property def supports_correlated(self): """`False` : Uncertainty propagation is *not* possible for this class. """ return False @property def uncertainty_type(self): """``"unknown"`` : `UnknownUncertainty` implements any unknown \ uncertainty type. """ return 'unknown' def _data_unit_to_uncertainty_unit(self, value): """ No way to convert if uncertainty is unknown. """ return None def _convert_uncertainty(self, other_uncert): """Raise an Exception because unknown uncertainty types cannot implement propagation. """ msg = "Uncertainties of unknown type cannot be propagated." raise IncompatibleUncertaintiesException(msg) def _propagate_add(self, other_uncert, result_data, correlation): """Not possible for unknown uncertainty types. """ return None def _propagate_subtract(self, other_uncert, result_data, correlation): return None def _propagate_multiply(self, other_uncert, result_data, correlation): return None def _propagate_divide(self, other_uncert, result_data, correlation): return None class _VariancePropagationMixin: """ Propagation of uncertainties for variances, also used to perform error propagation for variance-like uncertainties (standard deviation and inverse variance). """ def _propagate_add_sub(self, other_uncert, result_data, correlation, subtract=False, to_variance=lambda x: x, from_variance=lambda x: x): """ Error propagation for addition or subtraction of variance or variance-like uncertainties. Uncertainties are calculated using the formulae for variance but can be used for uncertainty convertible to a variance. Parameters ---------- other_uncert : `~astropy.nddata.NDUncertainty` instance The uncertainty, if any, of the other operand. result_data : `~astropy.nddata.NDData` instance The results of the operation on the data. correlation : float or array-like Correlation of the uncertainties. subtract : bool, optional If ``True``, propagate for subtraction, otherwise propagate for addition. to_variance : function, optional Function that will transform the input uncertainties to variance. The default assumes the uncertainty is the variance. from_variance : function, optional Function that will convert from variance to the input uncertainty. The default assumes the uncertainty is the variance. """ if subtract: correlation_sign = -1 else: correlation_sign = 1 try: result_unit_sq = result_data.unit ** 2 except AttributeError: result_unit_sq = None if other_uncert.array is not None: # Formula: sigma**2 = dB if (other_uncert.unit is not None and result_unit_sq != to_variance(other_uncert.unit)): # If the other uncertainty has a unit and this unit differs # from the unit of the result convert it to the results unit other = to_variance(other_uncert.array << other_uncert.unit).to(result_unit_sq).value else: other = to_variance(other_uncert.array) else: other = 0 if self.array is not None: # Formula: sigma**2 = dA if self.unit is not None and to_variance(self.unit) != self.parent_nddata.unit**2: # If the uncertainty has a different unit than the result we # need to convert it to the results unit. this = to_variance(self.array << self.unit).to(result_unit_sq).value else: this = to_variance(self.array) else: this = 0 # Formula: sigma**2 = dA + dB +/- 2*cor*sqrt(dA*dB) # Formula: sigma**2 = sigma_other + sigma_self +/- 2*cor*sqrt(dA*dB) # (sign depends on whether addition or subtraction) # Determine the result depending on the correlation if isinstance(correlation, np.ndarray) or correlation != 0: corr = 2 * correlation * np.sqrt(this * other) result = this + other + correlation_sign * corr else: result = this + other return from_variance(result) def _propagate_multiply_divide(self, other_uncert, result_data, correlation, divide=False, to_variance=lambda x: x, from_variance=lambda x: x): """ Error propagation for multiplication or division of variance or variance-like uncertainties. Uncertainties are calculated using the formulae for variance but can be used for uncertainty convertible to a variance. Parameters ---------- other_uncert : `~astropy.nddata.NDUncertainty` instance The uncertainty, if any, of the other operand. result_data : `~astropy.nddata.NDData` instance The results of the operation on the data. correlation : float or array-like Correlation of the uncertainties. divide : bool, optional If ``True``, propagate for division, otherwise propagate for multiplication. to_variance : function, optional Function that will transform the input uncertainties to variance. The default assumes the uncertainty is the variance. from_variance : function, optional Function that will convert from variance to the input uncertainty. The default assumes the uncertainty is the variance. """ # For multiplication we don't need the result as quantity if isinstance(result_data, Quantity): result_data = result_data.value if divide: correlation_sign = -1 else: correlation_sign = 1 if other_uncert.array is not None: # We want the result to have a unit consistent with the parent, so # we only need to convert the unit of the other uncertainty if it # is different from its data's unit. if (other_uncert.unit and to_variance(1 * other_uncert.unit) != ((1 * other_uncert.parent_nddata.unit)**2).unit): d_b = to_variance(other_uncert.array << other_uncert.unit).to( (1 * other_uncert.parent_nddata.unit)**2).value else: d_b = to_variance(other_uncert.array) # Formula: sigma**2 = |A|**2 * d_b right = np.abs(self.parent_nddata.data**2 * d_b) else: right = 0 if self.array is not None: # Just the reversed case if (self.unit and to_variance(1 * self.unit) != ((1 * self.parent_nddata.unit)**2).unit): d_a = to_variance(self.array << self.unit).to( (1 * self.parent_nddata.unit)**2).value else: d_a = to_variance(self.array) # Formula: sigma**2 = |B|**2 * d_a left = np.abs(other_uncert.parent_nddata.data**2 * d_a) else: left = 0 # Multiplication # # The fundamental formula is: # sigma**2 = |AB|**2*(d_a/A**2+d_b/B**2+2*sqrt(d_a)/A*sqrt(d_b)/B*cor) # # This formula is not very handy since it generates NaNs for every # zero in A and B. So we rewrite it: # # Multiplication Formula: # sigma**2 = (d_a*B**2 + d_b*A**2 + (2 * cor * ABsqrt(dAdB))) # sigma**2 = (left + right + (2 * cor * ABsqrt(dAdB))) # # Division # # The fundamental formula for division is: # sigma**2 = |A/B|**2*(d_a/A**2+d_b/B**2-2*sqrt(d_a)/A*sqrt(d_b)/B*cor) # # As with multiplication, it is convenient to rewrite this to avoid # nans where A is zero. # # Division formula (rewritten): # sigma**2 = d_a/B**2 + (A/B)**2 * d_b/B**2 # - 2 * cor * A *sqrt(dAdB) / B**3 # sigma**2 = d_a/B**2 + (A/B)**2 * d_b/B**2 # - 2*cor * sqrt(d_a)/B**2 * sqrt(d_b) * A / B # sigma**2 = multiplication formula/B**4 (and sign change in # the correlation) if isinstance(correlation, np.ndarray) or correlation != 0: corr = (2 * correlation * np.sqrt(d_a * d_b) * self.parent_nddata.data * other_uncert.parent_nddata.data) else: corr = 0 if divide: return from_variance((left + right + correlation_sign * corr) / other_uncert.parent_nddata.data**4) else: return from_variance(left + right + correlation_sign * corr) class StdDevUncertainty(_VariancePropagationMixin, NDUncertainty): """Standard deviation uncertainty assuming first order gaussian error propagation. This class implements uncertainty propagation for ``addition``, ``subtraction``, ``multiplication`` and ``division`` with other instances of `StdDevUncertainty`. The class can handle if the uncertainty has a unit that differs from (but is convertible to) the parents `NDData` unit. The unit of the resulting uncertainty will have the same unit as the resulting data. Also support for correlation is possible but requires the correlation as input. It cannot handle correlation determination itself. Parameters ---------- args, kwargs : see `NDUncertainty` Examples -------- `StdDevUncertainty` should always be associated with an `NDData`-like instance, either by creating it during initialization:: >>> from astropy.nddata import NDData, StdDevUncertainty >>> ndd = NDData([1,2,3], unit='m', ... uncertainty=StdDevUncertainty([0.1, 0.1, 0.1])) >>> ndd.uncertainty # doctest: +FLOAT_CMP StdDevUncertainty([0.1, 0.1, 0.1]) or by setting it manually on the `NDData` instance:: >>> ndd.uncertainty = StdDevUncertainty([0.2], unit='m', copy=True) >>> ndd.uncertainty # doctest: +FLOAT_CMP StdDevUncertainty([0.2]) the uncertainty ``array`` can also be set directly:: >>> ndd.uncertainty.array = 2 >>> ndd.uncertainty StdDevUncertainty(2) .. note:: The unit will not be displayed. """ @property def supports_correlated(self): """`True` : `StdDevUncertainty` allows to propagate correlated \ uncertainties. ``correlation`` must be given, this class does not implement computing it by itself. """ return True @property def uncertainty_type(self): """``"std"`` : `StdDevUncertainty` implements standard deviation. """ return 'std' def _convert_uncertainty(self, other_uncert): if isinstance(other_uncert, StdDevUncertainty): return other_uncert else: raise IncompatibleUncertaintiesException def _propagate_add(self, other_uncert, result_data, correlation): return super()._propagate_add_sub(other_uncert, result_data, correlation, subtract=False, to_variance=np.square, from_variance=np.sqrt) def _propagate_subtract(self, other_uncert, result_data, correlation): return super()._propagate_add_sub(other_uncert, result_data, correlation, subtract=True, to_variance=np.square, from_variance=np.sqrt) def _propagate_multiply(self, other_uncert, result_data, correlation): return super()._propagate_multiply_divide(other_uncert, result_data, correlation, divide=False, to_variance=np.square, from_variance=np.sqrt) def _propagate_divide(self, other_uncert, result_data, correlation): return super()._propagate_multiply_divide(other_uncert, result_data, correlation, divide=True, to_variance=np.square, from_variance=np.sqrt) def _data_unit_to_uncertainty_unit(self, value): return value def _convert_to_variance(self): new_array = None if self.array is None else self.array ** 2 new_unit = None if self.unit is None else self.unit ** 2 return VarianceUncertainty(new_array, unit=new_unit) @classmethod def _convert_from_variance(cls, var_uncert): new_array = None if var_uncert.array is None else var_uncert.array ** (1 / 2) new_unit = None if var_uncert.unit is None else var_uncert.unit ** (1 / 2) return cls(new_array, unit=new_unit) class VarianceUncertainty(_VariancePropagationMixin, NDUncertainty): """ Variance uncertainty assuming first order Gaussian error propagation. This class implements uncertainty propagation for ``addition``, ``subtraction``, ``multiplication`` and ``division`` with other instances of `VarianceUncertainty`. The class can handle if the uncertainty has a unit that differs from (but is convertible to) the parents `NDData` unit. The unit of the resulting uncertainty will be the square of the unit of the resulting data. Also support for correlation is possible but requires the correlation as input. It cannot handle correlation determination itself. Parameters ---------- args, kwargs : see `NDUncertainty` Examples -------- Compare this example to that in `StdDevUncertainty`; the uncertainties in the examples below are equivalent to the uncertainties in `StdDevUncertainty`. `VarianceUncertainty` should always be associated with an `NDData`-like instance, either by creating it during initialization:: >>> from astropy.nddata import NDData, VarianceUncertainty >>> ndd = NDData([1,2,3], unit='m', ... uncertainty=VarianceUncertainty([0.01, 0.01, 0.01])) >>> ndd.uncertainty # doctest: +FLOAT_CMP VarianceUncertainty([0.01, 0.01, 0.01]) or by setting it manually on the `NDData` instance:: >>> ndd.uncertainty = VarianceUncertainty([0.04], unit='m^2', copy=True) >>> ndd.uncertainty # doctest: +FLOAT_CMP VarianceUncertainty([0.04]) the uncertainty ``array`` can also be set directly:: >>> ndd.uncertainty.array = 4 >>> ndd.uncertainty VarianceUncertainty(4) .. note:: The unit will not be displayed. """ @property def uncertainty_type(self): """``"var"`` : `VarianceUncertainty` implements variance. """ return 'var' @property def supports_correlated(self): """`True` : `VarianceUncertainty` allows to propagate correlated \ uncertainties. ``correlation`` must be given, this class does not implement computing it by itself. """ return True def _propagate_add(self, other_uncert, result_data, correlation): return super()._propagate_add_sub(other_uncert, result_data, correlation, subtract=False) def _propagate_subtract(self, other_uncert, result_data, correlation): return super()._propagate_add_sub(other_uncert, result_data, correlation, subtract=True) def _propagate_multiply(self, other_uncert, result_data, correlation): return super()._propagate_multiply_divide(other_uncert, result_data, correlation, divide=False) def _propagate_divide(self, other_uncert, result_data, correlation): return super()._propagate_multiply_divide(other_uncert, result_data, correlation, divide=True) def _data_unit_to_uncertainty_unit(self, value): return value ** 2 def _convert_to_variance(self): return self @classmethod def _convert_from_variance(cls, var_uncert): return var_uncert def _inverse(x): """Just a simple inverse for use in the InverseVariance""" return 1 / x class InverseVariance(_VariancePropagationMixin, NDUncertainty): """ Inverse variance uncertainty assuming first order Gaussian error propagation. This class implements uncertainty propagation for ``addition``, ``subtraction``, ``multiplication`` and ``division`` with other instances of `InverseVariance`. The class can handle if the uncertainty has a unit that differs from (but is convertible to) the parents `NDData` unit. The unit of the resulting uncertainty will the inverse square of the unit of the resulting data. Also support for correlation is possible but requires the correlation as input. It cannot handle correlation determination itself. Parameters ---------- args, kwargs : see `NDUncertainty` Examples -------- Compare this example to that in `StdDevUncertainty`; the uncertainties in the examples below are equivalent to the uncertainties in `StdDevUncertainty`. `InverseVariance` should always be associated with an `NDData`-like instance, either by creating it during initialization:: >>> from astropy.nddata import NDData, InverseVariance >>> ndd = NDData([1,2,3], unit='m', ... uncertainty=InverseVariance([100, 100, 100])) >>> ndd.uncertainty # doctest: +FLOAT_CMP InverseVariance([100, 100, 100]) or by setting it manually on the `NDData` instance:: >>> ndd.uncertainty = InverseVariance([25], unit='1/m^2', copy=True) >>> ndd.uncertainty # doctest: +FLOAT_CMP InverseVariance([25]) the uncertainty ``array`` can also be set directly:: >>> ndd.uncertainty.array = 0.25 >>> ndd.uncertainty InverseVariance(0.25) .. note:: The unit will not be displayed. """ @property def uncertainty_type(self): """``"ivar"`` : `InverseVariance` implements inverse variance. """ return 'ivar' @property def supports_correlated(self): """`True` : `InverseVariance` allows to propagate correlated \ uncertainties. ``correlation`` must be given, this class does not implement computing it by itself. """ return True def _propagate_add(self, other_uncert, result_data, correlation): return super()._propagate_add_sub(other_uncert, result_data, correlation, subtract=False, to_variance=_inverse, from_variance=_inverse) def _propagate_subtract(self, other_uncert, result_data, correlation): return super()._propagate_add_sub(other_uncert, result_data, correlation, subtract=True, to_variance=_inverse, from_variance=_inverse) def _propagate_multiply(self, other_uncert, result_data, correlation): return super()._propagate_multiply_divide(other_uncert, result_data, correlation, divide=False, to_variance=_inverse, from_variance=_inverse) def _propagate_divide(self, other_uncert, result_data, correlation): return super()._propagate_multiply_divide(other_uncert, result_data, correlation, divide=True, to_variance=_inverse, from_variance=_inverse) def _data_unit_to_uncertainty_unit(self, value): return 1 / value ** 2 def _convert_to_variance(self): new_array = None if self.array is None else 1 / self.array new_unit = None if self.unit is None else 1 / self.unit return VarianceUncertainty(new_array, unit=new_unit) @classmethod def _convert_from_variance(cls, var_uncert): new_array = None if var_uncert.array is None else 1 / var_uncert.array new_unit = None if var_uncert.unit is None else 1 / var_uncert.unit return cls(new_array, unit=new_unit)
8e3f3e0b6ab37c264a94a88c6639c85142286f5dfca2e0ad7a0daec8ffd05c6e
# Licensed under a 3-clause BSD style license - see LICENSE.rst """This module implements the base CCDData class.""" import itertools import numpy as np from .compat import NDDataArray from .nduncertainty import ( StdDevUncertainty, NDUncertainty, VarianceUncertainty, InverseVariance) from astropy.io import fits, registry from astropy import units as u from astropy import log from astropy.wcs import WCS from astropy.utils.decorators import sharedmethod __all__ = ['CCDData', 'fits_ccddata_reader', 'fits_ccddata_writer'] _known_uncertainties = (StdDevUncertainty, VarianceUncertainty, InverseVariance) _unc_name_to_cls = {cls.__name__: cls for cls in _known_uncertainties} _unc_cls_to_name = {cls: cls.__name__ for cls in _known_uncertainties} # Global value which can turn on/off the unit requirements when creating a # CCDData. Should be used with care because several functions actually break # if the unit is None! _config_ccd_requires_unit = True def _arithmetic(op): """Decorator factory which temporarily disables the need for a unit when creating a new CCDData instance. The final result must have a unit. Parameters ---------- op : function The function to apply. Supported are: - ``np.add`` - ``np.subtract`` - ``np.multiply`` - ``np.true_divide`` Notes ----- Should only be used on CCDData ``add``, ``subtract``, ``divide`` or ``multiply`` because only these methods from NDArithmeticMixin are overwritten. """ def decorator(func): def inner(self, operand, operand2=None, **kwargs): global _config_ccd_requires_unit _config_ccd_requires_unit = False result = self._prepare_then_do_arithmetic(op, operand, operand2, **kwargs) # Wrap it again as CCDData so it checks the final unit. _config_ccd_requires_unit = True return result.__class__(result) inner.__doc__ = f"See `astropy.nddata.NDArithmeticMixin.{func.__name__}`." return sharedmethod(inner) return decorator def _uncertainty_unit_equivalent_to_parent(uncertainty_type, unit, parent_unit): if uncertainty_type is StdDevUncertainty: return unit == parent_unit elif uncertainty_type is VarianceUncertainty: return unit == (parent_unit ** 2) elif uncertainty_type is InverseVariance: return unit == (1 / (parent_unit ** 2)) raise ValueError(f"unsupported uncertainty type: {uncertainty_type}") class CCDData(NDDataArray): """A class describing basic CCD data. The CCDData class is based on the NDData object and includes a data array, uncertainty frame, mask frame, flag frame, meta data, units, and WCS information for a single CCD image. Parameters ---------- data : `~astropy.nddata.CCDData`-like or array-like The actual data contained in this `~astropy.nddata.CCDData` object. Note that the data will always be saved by *reference*, so you should make a copy of the ``data`` before passing it in if that's the desired behavior. uncertainty : `~astropy.nddata.StdDevUncertainty`, \ `~astropy.nddata.VarianceUncertainty`, \ `~astropy.nddata.InverseVariance`, `numpy.ndarray` or \ None, optional Uncertainties on the data. If the uncertainty is a `numpy.ndarray`, it it assumed to be, and stored as, a `~astropy.nddata.StdDevUncertainty`. Default is ``None``. mask : `numpy.ndarray` or None, optional Mask for the data, given as a boolean Numpy array with a shape matching that of the data. The values must be `False` where the data is *valid* and `True` when it is not (like Numpy masked arrays). If ``data`` is a numpy masked array, providing ``mask`` here will causes the mask from the masked array to be ignored. Default is ``None``. flags : `numpy.ndarray` or `~astropy.nddata.FlagCollection` or None, \ optional Flags giving information about each pixel. These can be specified either as a Numpy array of any type with a shape matching that of the data, or as a `~astropy.nddata.FlagCollection` instance which has a shape matching that of the data. Default is ``None``. wcs : `~astropy.wcs.WCS` or None, optional WCS-object containing the world coordinate system for the data. Default is ``None``. meta : dict-like object or None, optional Metadata for this object. "Metadata" here means all information that is included with this object but not part of any other attribute of this particular object, e.g. creation date, unique identifier, simulation parameters, exposure time, telescope name, etc. unit : `~astropy.units.Unit` or str, optional The units of the data. Default is ``None``. .. warning:: If the unit is ``None`` or not otherwise specified it will raise a ``ValueError`` Raises ------ ValueError If the ``uncertainty`` or ``mask`` inputs cannot be broadcast (e.g., match shape) onto ``data``. Methods ------- read(\\*args, \\**kwargs) ``Classmethod`` to create an CCDData instance based on a ``FITS`` file. This method uses :func:`fits_ccddata_reader` with the provided parameters. write(\\*args, \\**kwargs) Writes the contents of the CCDData instance into a new ``FITS`` file. This method uses :func:`fits_ccddata_writer` with the provided parameters. Attributes ---------- known_invalid_fits_unit_strings A dictionary that maps commonly-used fits unit name strings that are technically invalid to the correct valid unit type (or unit string). This is primarily for variant names like "ELECTRONS/S" which are not formally valid, but are unambiguous and frequently enough encountered that it is convenient to map them to the correct unit. Notes ----- `~astropy.nddata.CCDData` objects can be easily converted to a regular Numpy array using `numpy.asarray`. For example:: >>> from astropy.nddata import CCDData >>> import numpy as np >>> x = CCDData([1,2,3], unit='adu') >>> np.asarray(x) array([1, 2, 3]) This is useful, for example, when plotting a 2D image using matplotlib. >>> from astropy.nddata import CCDData >>> from matplotlib import pyplot as plt # doctest: +SKIP >>> x = CCDData([[1,2,3], [4,5,6]], unit='adu') >>> plt.imshow(x) # doctest: +SKIP """ def __init__(self, *args, **kwd): if 'meta' not in kwd: kwd['meta'] = kwd.pop('header', None) if 'header' in kwd: raise ValueError("can't have both header and meta.") super().__init__(*args, **kwd) if self._wcs is not None: llwcs = self._wcs.low_level_wcs if not isinstance(llwcs, WCS): raise TypeError("the wcs must be a WCS instance.") self._wcs = llwcs # Check if a unit is set. This can be temporarily disabled by the # _CCDDataUnit contextmanager. if _config_ccd_requires_unit and self.unit is None: raise ValueError("a unit for CCDData must be specified.") def _slice_wcs(self, item): """ Override the WCS slicing behaviour so that the wcs attribute continues to be an `astropy.wcs.WCS`. """ if self.wcs is None: return None try: return self.wcs[item] except Exception as err: self._handle_wcs_slicing_error(err, item) @property def data(self): return self._data @data.setter def data(self, value): self._data = value @property def wcs(self): return self._wcs @wcs.setter def wcs(self, value): if value is not None and not isinstance(value, WCS): raise TypeError("the wcs must be a WCS instance.") self._wcs = value @property def unit(self): return self._unit @unit.setter def unit(self, value): self._unit = u.Unit(value) @property def header(self): return self._meta @header.setter def header(self, value): self.meta = value @property def uncertainty(self): return self._uncertainty @uncertainty.setter def uncertainty(self, value): if value is not None: if isinstance(value, NDUncertainty): if getattr(value, '_parent_nddata', None) is not None: value = value.__class__(value, copy=False) self._uncertainty = value elif isinstance(value, np.ndarray): if value.shape != self.shape: raise ValueError("uncertainty must have same shape as " "data.") self._uncertainty = StdDevUncertainty(value) log.info("array provided for uncertainty; assuming it is a " "StdDevUncertainty.") else: raise TypeError("uncertainty must be an instance of a " "NDUncertainty object or a numpy array.") self._uncertainty.parent_nddata = self else: self._uncertainty = value def to_hdu(self, hdu_mask='MASK', hdu_uncertainty='UNCERT', hdu_flags=None, wcs_relax=True, key_uncertainty_type='UTYPE', as_image_hdu=False): """Creates an HDUList object from a CCDData object. Parameters ---------- hdu_mask, hdu_uncertainty, hdu_flags : str or None, optional If it is a string append this attribute to the HDUList as `~astropy.io.fits.ImageHDU` with the string as extension name. Flags are not supported at this time. If ``None`` this attribute is not appended. Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty and ``None`` for flags. wcs_relax : bool Value of the ``relax`` parameter to use in converting the WCS to a FITS header using `~astropy.wcs.WCS.to_header`. The common ``CTYPE`` ``RA---TAN-SIP`` and ``DEC--TAN-SIP`` requires ``relax=True`` for the ``-SIP`` part of the ``CTYPE`` to be preserved. key_uncertainty_type : str, optional The header key name for the class name of the uncertainty (if any) that is used to store the uncertainty type in the uncertainty hdu. Default is ``UTYPE``. .. versionadded:: 3.1 as_image_hdu : bool If this option is `True`, the first item of the returned `~astropy.io.fits.HDUList` is a `~astropy.io.fits.ImageHDU`, instead of the default `~astropy.io.fits.PrimaryHDU`. Raises ------ ValueError - If ``self.mask`` is set but not a `numpy.ndarray`. - If ``self.uncertainty`` is set but not a astropy uncertainty type. - If ``self.uncertainty`` is set but has another unit then ``self.data``. NotImplementedError Saving flags is not supported. Returns ------- hdulist : `~astropy.io.fits.HDUList` """ if isinstance(self.header, fits.Header): # Copy here so that we can modify the HDU header by adding WCS # information without changing the header of the CCDData object. header = self.header.copy() else: # Because _insert_in_metadata_fits_safe is written as a method # we need to create a dummy CCDData instance to hold the FITS # header we are constructing. This probably indicates that # _insert_in_metadata_fits_safe should be rewritten in a more # sensible way... dummy_ccd = CCDData([1], meta=fits.Header(), unit="adu") for k, v in self.header.items(): dummy_ccd._insert_in_metadata_fits_safe(k, v) header = dummy_ccd.header if self.unit is not u.dimensionless_unscaled: header['bunit'] = self.unit.to_string() if self.wcs: # Simply extending the FITS header with the WCS can lead to # duplicates of the WCS keywords; iterating over the WCS # header should be safer. # # Turns out if I had read the io.fits.Header.extend docs more # carefully, I would have realized that the keywords exist to # avoid duplicates and preserve, as much as possible, the # structure of the commentary cards. # # Note that until astropy/astropy#3967 is closed, the extend # will fail if there are comment cards in the WCS header but # not header. wcs_header = self.wcs.to_header(relax=wcs_relax) header.extend(wcs_header, useblanks=False, update=True) if as_image_hdu: hdus = [fits.ImageHDU(self.data, header)] else: hdus = [fits.PrimaryHDU(self.data, header)] if hdu_mask and self.mask is not None: # Always assuming that the mask is a np.ndarray (check that it has # a 'shape'). if not hasattr(self.mask, 'shape'): raise ValueError('only a numpy.ndarray mask can be saved.') # Convert boolean mask to uint since io.fits cannot handle bool. hduMask = fits.ImageHDU(self.mask.astype(np.uint8), name=hdu_mask) hdus.append(hduMask) if hdu_uncertainty and self.uncertainty is not None: # We need to save some kind of information which uncertainty was # used so that loading the HDUList can infer the uncertainty type. # No idea how this can be done so only allow StdDevUncertainty. uncertainty_cls = self.uncertainty.__class__ if uncertainty_cls not in _known_uncertainties: raise ValueError('only uncertainties of type {} can be saved.' .format(_known_uncertainties)) uncertainty_name = _unc_cls_to_name[uncertainty_cls] hdr_uncertainty = fits.Header() hdr_uncertainty[key_uncertainty_type] = uncertainty_name # Assuming uncertainty is an StdDevUncertainty save just the array # this might be problematic if the Uncertainty has a unit differing # from the data so abort for different units. This is important for # astropy > 1.2 if (hasattr(self.uncertainty, 'unit') and self.uncertainty.unit is not None): if not _uncertainty_unit_equivalent_to_parent( uncertainty_cls, self.uncertainty.unit, self.unit): raise ValueError( 'saving uncertainties with a unit that is not ' 'equivalent to the unit from the data unit is not ' 'supported.') hduUncert = fits.ImageHDU(self.uncertainty.array, hdr_uncertainty, name=hdu_uncertainty) hdus.append(hduUncert) if hdu_flags and self.flags: raise NotImplementedError('adding the flags to a HDU is not ' 'supported at this time.') hdulist = fits.HDUList(hdus) return hdulist def copy(self): """ Return a copy of the CCDData object. """ return self.__class__(self, copy=True) add = _arithmetic(np.add)(NDDataArray.add) subtract = _arithmetic(np.subtract)(NDDataArray.subtract) multiply = _arithmetic(np.multiply)(NDDataArray.multiply) divide = _arithmetic(np.true_divide)(NDDataArray.divide) def _insert_in_metadata_fits_safe(self, key, value): """ Insert key/value pair into metadata in a way that FITS can serialize. Parameters ---------- key : str Key to be inserted in dictionary. value : str or None Value to be inserted. Notes ----- This addresses a shortcoming of the FITS standard. There are length restrictions on both the ``key`` (8 characters) and ``value`` (72 characters) in the FITS standard. There is a convention for handling long keywords and a convention for handling long values, but the two conventions cannot be used at the same time. This addresses that case by checking the length of the ``key`` and ``value`` and, if necessary, shortening the key. """ if len(key) > 8 and len(value) > 72: short_name = key[:8] self.meta[f'HIERARCH {key.upper()}'] = ( short_name, f"Shortened name for {key}") self.meta[short_name] = value else: self.meta[key] = value # A dictionary mapping "known" invalid fits unit known_invalid_fits_unit_strings = {'ELECTRONS/S': u.electron/u.s, 'ELECTRONS': u.electron, 'electrons': u.electron} # These need to be importable by the tests... _KEEP_THESE_KEYWORDS_IN_HEADER = [ 'JD-OBS', 'MJD-OBS', 'DATE-OBS' ] _PCs = set(['PC1_1', 'PC1_2', 'PC2_1', 'PC2_2']) _CDs = set(['CD1_1', 'CD1_2', 'CD2_1', 'CD2_2']) def _generate_wcs_and_update_header(hdr): """ Generate a WCS object from a header and remove the WCS-specific keywords from the header. Parameters ---------- hdr : astropy.io.fits.header or other dict-like Returns ------- new_header, wcs """ # Try constructing a WCS object. try: wcs = WCS(hdr) except Exception as exc: # Normally WCS only raises Warnings and doesn't fail but in rare # cases (malformed header) it could fail... log.info('An exception happened while extracting WCS information from ' 'the Header.\n{}: {}'.format(type(exc).__name__, str(exc))) return hdr, None # Test for success by checking to see if the wcs ctype has a non-empty # value, return None for wcs if ctype is empty. if not wcs.wcs.ctype[0]: return (hdr, None) new_hdr = hdr.copy() # If the keywords below are in the header they are also added to WCS. # It seems like they should *not* be removed from the header, though. wcs_header = wcs.to_header(relax=True) for k in wcs_header: if k not in _KEEP_THESE_KEYWORDS_IN_HEADER: new_hdr.remove(k, ignore_missing=True) # Check that this does not result in an inconsistent header WCS if the WCS # is converted back to a header. if (_PCs & set(wcs_header)) and (_CDs & set(new_hdr)): # The PCi_j representation is used by the astropy.wcs object, # so CDi_j keywords were not removed from new_hdr. Remove them now. for cd in _CDs: new_hdr.remove(cd, ignore_missing=True) # The other case -- CD in the header produced by astropy.wcs -- should # never happen based on [1], which computes the matrix in PC form. # [1]: https://github.com/astropy/astropy/blob/1cf277926d3598dd672dd528504767c37531e8c9/cextern/wcslib/C/wcshdr.c#L596 # # The test test_ccddata.test_wcs_keyword_removal_for_wcs_test_files() does # check for the possibility that both PC and CD are present in the result # so if the implementation of to_header changes in wcslib in the future # then the tests should catch it, and then this code will need to be # updated. # We need to check for any SIP coefficients that got left behind if the # header has SIP. if wcs.sip is not None: keyword = '{}_{}_{}' polynomials = ['A', 'B', 'AP', 'BP'] for poly in polynomials: order = wcs.sip.__getattribute__(f'{poly.lower()}_order') for i, j in itertools.product(range(order), repeat=2): new_hdr.remove(keyword.format(poly, i, j), ignore_missing=True) return (new_hdr, wcs) def fits_ccddata_reader(filename, hdu=0, unit=None, hdu_uncertainty='UNCERT', hdu_mask='MASK', hdu_flags=None, key_uncertainty_type='UTYPE', **kwd): """ Generate a CCDData object from a FITS file. Parameters ---------- filename : str Name of fits file. hdu : int, str, tuple of (str, int), optional Index or other identifier of the Header Data Unit of the FITS file from which CCDData should be initialized. If zero and no data in the primary HDU, it will search for the first extension HDU with data. The header will be added to the primary HDU. Default is ``0``. unit : `~astropy.units.Unit`, optional Units of the image data. If this argument is provided and there is a unit for the image in the FITS header (the keyword ``BUNIT`` is used as the unit, if present), this argument is used for the unit. Default is ``None``. hdu_uncertainty : str or None, optional FITS extension from which the uncertainty should be initialized. If the extension does not exist the uncertainty of the CCDData is ``None``. Default is ``'UNCERT'``. hdu_mask : str or None, optional FITS extension from which the mask should be initialized. If the extension does not exist the mask of the CCDData is ``None``. Default is ``'MASK'``. hdu_flags : str or None, optional Currently not implemented. Default is ``None``. key_uncertainty_type : str, optional The header key name where the class name of the uncertainty is stored in the hdu of the uncertainty (if any). Default is ``UTYPE``. .. versionadded:: 3.1 kwd : Any additional keyword parameters are passed through to the FITS reader in :mod:`astropy.io.fits`; see Notes for additional discussion. Notes ----- FITS files that contained scaled data (e.g. unsigned integer images) will be scaled and the keywords used to manage scaled data in :mod:`astropy.io.fits` are disabled. """ unsupport_open_keywords = { 'do_not_scale_image_data': 'Image data must be scaled.', 'scale_back': 'Scale information is not preserved.' } for key, msg in unsupport_open_keywords.items(): if key in kwd: prefix = f'unsupported keyword: {key}.' raise TypeError(' '.join([prefix, msg])) with fits.open(filename, **kwd) as hdus: hdr = hdus[hdu].header if hdu_uncertainty is not None and hdu_uncertainty in hdus: unc_hdu = hdus[hdu_uncertainty] stored_unc_name = unc_hdu.header.get(key_uncertainty_type, 'None') # For compatibility reasons the default is standard deviation # uncertainty because files could have been created before the # uncertainty type was stored in the header. unc_type = _unc_name_to_cls.get(stored_unc_name, StdDevUncertainty) uncertainty = unc_type(unc_hdu.data) else: uncertainty = None if hdu_mask is not None and hdu_mask in hdus: # Mask is saved as uint but we want it to be boolean. mask = hdus[hdu_mask].data.astype(np.bool_) else: mask = None if hdu_flags is not None and hdu_flags in hdus: raise NotImplementedError('loading flags is currently not ' 'supported.') # search for the first instance with data if # the primary header is empty. if hdu == 0 and hdus[hdu].data is None: for i in range(len(hdus)): if (hdus.info(hdu)[i][3] == 'ImageHDU' and hdus.fileinfo(i)['datSpan'] > 0): hdu = i comb_hdr = hdus[hdu].header.copy() # Add header values from the primary header that aren't # present in the extension header. comb_hdr.extend(hdr, unique=True) hdr = comb_hdr log.info(f"first HDU with data is extension {hdu}.") break if 'bunit' in hdr: fits_unit_string = hdr['bunit'] # patch to handle FITS files using ADU for the unit instead of the # standard version of 'adu' if fits_unit_string.strip().lower() == 'adu': fits_unit_string = fits_unit_string.lower() else: fits_unit_string = None if fits_unit_string: if unit is None: # Convert the BUNIT header keyword to a unit and if that's not # possible raise a meaningful error message. try: kifus = CCDData.known_invalid_fits_unit_strings if fits_unit_string in kifus: fits_unit_string = kifus[fits_unit_string] fits_unit_string = u.Unit(fits_unit_string) except ValueError: raise ValueError( 'The Header value for the key BUNIT ({}) cannot be ' 'interpreted as valid unit. To successfully read the ' 'file as CCDData you can pass in a valid `unit` ' 'argument explicitly or change the header of the FITS ' 'file before reading it.' .format(fits_unit_string)) else: log.info("using the unit {} passed to the FITS reader instead " "of the unit {} in the FITS file." .format(unit, fits_unit_string)) use_unit = unit or fits_unit_string hdr, wcs = _generate_wcs_and_update_header(hdr) ccd_data = CCDData(hdus[hdu].data, meta=hdr, unit=use_unit, mask=mask, uncertainty=uncertainty, wcs=wcs) return ccd_data def fits_ccddata_writer( ccd_data, filename, hdu_mask='MASK', hdu_uncertainty='UNCERT', hdu_flags=None, key_uncertainty_type='UTYPE', as_image_hdu=False, **kwd): """ Write CCDData object to FITS file. Parameters ---------- filename : str Name of file. hdu_mask, hdu_uncertainty, hdu_flags : str or None, optional If it is a string append this attribute to the HDUList as `~astropy.io.fits.ImageHDU` with the string as extension name. Flags are not supported at this time. If ``None`` this attribute is not appended. Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty and ``None`` for flags. key_uncertainty_type : str, optional The header key name for the class name of the uncertainty (if any) that is used to store the uncertainty type in the uncertainty hdu. Default is ``UTYPE``. .. versionadded:: 3.1 as_image_hdu : bool If this option is `True`, the first item of the returned `~astropy.io.fits.HDUList` is a `~astropy.io.fits.ImageHDU`, instead of the default `~astropy.io.fits.PrimaryHDU`. kwd : All additional keywords are passed to :py:mod:`astropy.io.fits` Raises ------ ValueError - If ``self.mask`` is set but not a `numpy.ndarray`. - If ``self.uncertainty`` is set but not a `~astropy.nddata.StdDevUncertainty`. - If ``self.uncertainty`` is set but has another unit then ``self.data``. NotImplementedError Saving flags is not supported. """ hdu = ccd_data.to_hdu( hdu_mask=hdu_mask, hdu_uncertainty=hdu_uncertainty, key_uncertainty_type=key_uncertainty_type, hdu_flags=hdu_flags, as_image_hdu=as_image_hdu) if as_image_hdu: hdu.insert(0, fits.PrimaryHDU()) hdu.writeto(filename, **kwd) with registry.delay_doc_updates(CCDData): registry.register_reader('fits', CCDData, fits_ccddata_reader) registry.register_writer('fits', CCDData, fits_ccddata_writer) registry.register_identifier('fits', CCDData, fits.connect.is_fits)
8c0ae7b79a582d50f842d974756fd2f2084afd05425cced785a6327e3576939e
# Licensed under a 3-clause BSD style license - see LICENSE.rst import io import os from os.path import join import os.path import shutil import sys from collections import defaultdict from setuptools import Extension from setuptools.dep_util import newer_group import numpy from extension_helpers import import_file, write_if_different, get_compiler, pkg_config WCSROOT = os.path.relpath(os.path.dirname(__file__)) WCSVERSION = "7.11" def b(s): return s.encode('ascii') def string_escape(s): s = s.decode('ascii').encode('ascii', 'backslashreplace') s = s.replace(b'\n', b'\\n') s = s.replace(b'\0', b'\\0') return s.decode('ascii') def determine_64_bit_int(): """ The only configuration parameter needed at compile-time is how to specify a 64-bit signed integer. Python's ctypes module can get us that information. If we can't be absolutely certain, we default to "long long int", which is correct on most platforms (x86, x86_64). If we find platforms where this heuristic doesn't work, we may need to hardcode for them. """ try: try: import ctypes except ImportError: raise ValueError() if ctypes.sizeof(ctypes.c_longlong) == 8: return "long long int" elif ctypes.sizeof(ctypes.c_long) == 8: return "long int" elif ctypes.sizeof(ctypes.c_int) == 8: return "int" else: raise ValueError() except ValueError: return "long long int" def write_wcsconfig_h(paths): """ Writes out the wcsconfig.h header with local configuration. """ h_file = io.StringIO() h_file.write(""" /* The bundled version has WCSLIB_VERSION */ #define HAVE_WCSLIB_VERSION 1 /* WCSLIB library version number. */ #define WCSLIB_VERSION {} /* 64-bit integer data type. */ #define WCSLIB_INT64 {} /* Windows needs some other defines to prevent inclusion of wcsset() which conflicts with wcslib's wcsset(). These need to be set on code that *uses* astropy.wcs, in addition to astropy.wcs itself. */ #if defined(_WIN32) || defined(_MSC_VER) || defined(__MINGW32__) || defined (__MINGW64__) #ifndef YY_NO_UNISTD_H #define YY_NO_UNISTD_H #endif #ifndef _CRT_SECURE_NO_WARNINGS #define _CRT_SECURE_NO_WARNINGS #endif #ifndef _NO_OLDNAMES #define _NO_OLDNAMES #endif #ifndef NO_OLDNAMES #define NO_OLDNAMES #endif #ifndef __STDC__ #define __STDC__ 1 #endif #endif """.format(WCSVERSION, determine_64_bit_int())) content = h_file.getvalue().encode('ascii') for path in paths: write_if_different(path, content) ###################################################################### # GENERATE DOCSTRINGS IN C def generate_c_docstrings(): docstrings = import_file(os.path.join(WCSROOT, 'docstrings.py')) docstrings = docstrings.__dict__ keys = [ key for key, val in docstrings.items() if not key.startswith('__') and isinstance(val, str)] keys.sort() docs = {} for key in keys: docs[key] = docstrings[key].encode('utf8').lstrip() + b'\0' h_file = io.StringIO() h_file.write("""/* DO NOT EDIT! This file is autogenerated by astropy/wcs/setup_package.py. To edit its contents, edit astropy/wcs/docstrings.py */ #ifndef __DOCSTRINGS_H__ #define __DOCSTRINGS_H__ """) for key in keys: val = docs[key] h_file.write(f'extern char doc_{key}[{len(val)}];\n') h_file.write("\n#endif\n\n") write_if_different( join(WCSROOT, 'include', 'astropy_wcs', 'docstrings.h'), h_file.getvalue().encode('utf-8')) c_file = io.StringIO() c_file.write("""/* DO NOT EDIT! This file is autogenerated by astropy/wcs/setup_package.py. To edit its contents, edit astropy/wcs/docstrings.py The weirdness here with strncpy is because some C compilers, notably MSVC, do not support string literals greater than 256 characters. */ #include <string.h> #include "astropy_wcs/docstrings.h" """) for key in keys: val = docs[key] c_file.write(f'char doc_{key}[{len(val)}] = {{\n') for i in range(0, len(val), 12): section = val[i:i+12] c_file.write(' ') c_file.write(''.join(f'0x{x:02x}, ' for x in section)) c_file.write('\n') c_file.write(" };\n\n") write_if_different( join(WCSROOT, 'src', 'docstrings.c'), c_file.getvalue().encode('utf-8')) def get_wcslib_cfg(cfg, wcslib_files, include_paths): debug = '--debug' in sys.argv cfg['include_dirs'].append(numpy.get_include()) cfg['define_macros'].extend([ ('ECHO', None), ('WCSTRIG_MACRO', None), ('ASTROPY_WCS_BUILD', None), ('_GNU_SOURCE', None)]) if ((int(os.environ.get('ASTROPY_USE_SYSTEM_WCSLIB', 0)) or int(os.environ.get('ASTROPY_USE_SYSTEM_ALL', 0))) and not sys.platform == 'win32'): wcsconfig_h_path = join(WCSROOT, 'include', 'wcsconfig.h') if os.path.exists(wcsconfig_h_path): os.unlink(wcsconfig_h_path) for k, v in pkg_config(['wcslib'], ['wcs']).items(): cfg[k].extend(v) else: write_wcsconfig_h(include_paths) wcslib_path = join("cextern", "wcslib") # Path to wcslib wcslib_cpath = join(wcslib_path, "C") # Path to wcslib source files cfg['sources'].extend(join(wcslib_cpath, x) for x in wcslib_files) cfg['include_dirs'].append(wcslib_cpath) if debug: cfg['define_macros'].append(('DEBUG', None)) cfg['undef_macros'].append('NDEBUG') if (not sys.platform.startswith('sun') and not sys.platform == 'win32'): cfg['extra_compile_args'].extend(["-fno-inline", "-O0", "-g"]) else: # Define ECHO as nothing to prevent spurious newlines from # printing within the libwcs parser cfg['define_macros'].append(('NDEBUG', None)) cfg['undef_macros'].append('DEBUG') if sys.platform == 'win32': # These are written into wcsconfig.h, but that file is not # used by all parts of wcslib. cfg['define_macros'].extend([ ('YY_NO_UNISTD_H', None), ('_CRT_SECURE_NO_WARNINGS', None), ('_NO_OLDNAMES', None), # for mingw32 ('NO_OLDNAMES', None), # for mingw64 ('__STDC__', None) # for MSVC ]) if sys.platform.startswith('linux'): cfg['define_macros'].append(('HAVE_SINCOS', None)) # For 4.7+ enable C99 syntax in older compilers (need 'gnu99' std for gcc) if get_compiler() == 'unix': cfg['extra_compile_args'].extend(['-std=gnu99']) # Squelch a few compilation warnings in WCSLIB if get_compiler() in ('unix', 'mingw32'): if not debug: cfg['extra_compile_args'].extend([ '-Wno-strict-prototypes', '-Wno-unused-function', '-Wno-unused-value', '-Wno-uninitialized']) def get_extensions(): generate_c_docstrings() ###################################################################### # DISTUTILS SETUP cfg = defaultdict(list) wcslib_files = [ # List of wcslib files to compile 'flexed/wcsbth.c', 'flexed/wcspih.c', 'flexed/wcsulex.c', 'flexed/wcsutrn.c', 'cel.c', 'dis.c', 'lin.c', 'log.c', 'prj.c', 'spc.c', 'sph.c', 'spx.c', 'tab.c', 'wcs.c', 'wcserr.c', 'wcsfix.c', 'wcshdr.c', 'wcsprintf.c', 'wcsunits.c', 'wcsutil.c' ] wcslib_config_paths = [ join(WCSROOT, 'include', 'astropy_wcs', 'wcsconfig.h'), join(WCSROOT, 'include', 'wcsconfig.h') ] get_wcslib_cfg(cfg, wcslib_files, wcslib_config_paths) cfg['include_dirs'].append(join(WCSROOT, "include")) astropy_wcs_files = [ # List of astropy.wcs files to compile 'distortion.c', 'distortion_wrap.c', 'docstrings.c', 'pipeline.c', 'pyutil.c', 'astropy_wcs.c', 'astropy_wcs_api.c', 'sip.c', 'sip_wrap.c', 'str_list_proxy.c', 'unit_list_proxy.c', 'util.c', 'wcslib_wrap.c', 'wcslib_auxprm_wrap.c', 'wcslib_prjprm_wrap.c', 'wcslib_celprm_wrap.c', 'wcslib_tabprm_wrap.c', 'wcslib_wtbarr_wrap.c' ] cfg['sources'].extend(join(WCSROOT, 'src', x) for x in astropy_wcs_files) cfg['sources'] = [str(x) for x in cfg['sources']] cfg = dict((str(key), val) for key, val in cfg.items()) # Copy over header files from WCSLIB into the installed version of Astropy # so that other Python packages can write extensions that link to it. We # do the copying here then include the data in [options.package_data] in # the setup.cfg file wcslib_headers = [ 'cel.h', 'lin.h', 'prj.h', 'spc.h', 'spx.h', 'tab.h', 'wcs.h', 'wcserr.h', 'wcsmath.h', 'wcsprintf.h', ] if not (int(os.environ.get('ASTROPY_USE_SYSTEM_WCSLIB', 0)) or int(os.environ.get('ASTROPY_USE_SYSTEM_ALL', 0))): for header in wcslib_headers: source = join('cextern', 'wcslib', 'C', header) dest = join('astropy', 'wcs', 'include', 'wcslib', header) if newer_group([source], dest, 'newer'): shutil.copy(source, dest) return [Extension('astropy.wcs._wcs', **cfg)]
d8732fd88d7dedc838c2683218ac5f94373f3912cd3a5794481acf5177df4eb4
# Licensed under a 3-clause BSD style license - see LICENSE.rst # Under the hood, there are 3 separate classes that perform different # parts of the transformation: # # - `~astropy.wcs.Wcsprm`: Is a direct wrapper of the core WCS # functionality in `wcslib`_. (This includes TPV and TPD # polynomial distortion, but not SIP distortion). # # - `~astropy.wcs.Sip`: Handles polynomial distortion as defined in the # `SIP`_ convention. # # - `~astropy.wcs.DistortionLookupTable`: Handles `distortion paper`_ # lookup tables. # # Additionally, the class `WCS` aggregates all of these transformations # together in a pipeline: # # - Detector to image plane correction (by a pair of # `~astropy.wcs.DistortionLookupTable` objects). # # - `SIP`_ distortion correction (by an underlying `~astropy.wcs.Sip` # object) # # - `distortion paper`_ table-lookup correction (by a pair of # `~astropy.wcs.DistortionLookupTable` objects). # # - `wcslib`_ WCS transformation (by a `~astropy.wcs.Wcsprm` object) # STDLIB import copy import uuid import io import itertools import os import re import textwrap import warnings import builtins # THIRD- from packaging.version import Version import numpy as np # LOCAL from astropy import log from astropy.io import fits from . import docstrings from . import _wcs from astropy import units as u from astropy.utils.compat import possible_filename from astropy.utils.exceptions import AstropyWarning, AstropyUserWarning, AstropyDeprecationWarning from astropy.utils.decorators import deprecated_renamed_argument # Mix-in class that provides the APE 14 API from .wcsapi.fitswcs import FITSWCSAPIMixin, SlicedFITSWCS __all__ = ['FITSFixedWarning', 'WCS', 'find_all_wcs', 'DistortionLookupTable', 'Sip', 'Tabprm', 'Wcsprm', 'Auxprm', 'Celprm', 'Prjprm', 'Wtbarr', 'WCSBase', 'validate', 'WcsError', 'SingularMatrixError', 'InconsistentAxisTypesError', 'InvalidTransformError', 'InvalidCoordinateError', 'InvalidPrjParametersError', 'NoSolutionError', 'InvalidSubimageSpecificationError', 'NoConvergence', 'NonseparableSubimageCoordinateSystemError', 'NoWcsKeywordsFoundError', 'InvalidTabularParametersError'] __doctest_skip__ = ['WCS.all_world2pix'] if _wcs is not None: if Version(_wcs.__version__) < Version("5.8"): raise ImportError( "astropy.wcs is built with wcslib {0}, but only versions 5.8 and " "later on the 5.x series are known to work. The version of wcslib " "that ships with astropy may be used.") if not _wcs._sanity_check(): raise RuntimeError( "astropy.wcs did not pass its sanity check for your build " "on your platform.") _WCSSUB_TIME_SUPPORT = Version(_wcs.__version__) >= Version("7.8") _WCS_TPD_WARN_LT71 = Version(_wcs.__version__) < Version("7.1") _WCS_TPD_WARN_LT74 = Version(_wcs.__version__) < Version("7.4") WCSBase = _wcs._Wcs DistortionLookupTable = _wcs.DistortionLookupTable Sip = _wcs.Sip Wcsprm = _wcs.Wcsprm Auxprm = _wcs.Auxprm Celprm = _wcs.Celprm Prjprm = _wcs.Prjprm Tabprm = _wcs.Tabprm Wtbarr = _wcs.Wtbarr WcsError = _wcs.WcsError SingularMatrixError = _wcs.SingularMatrixError InconsistentAxisTypesError = _wcs.InconsistentAxisTypesError InvalidTransformError = _wcs.InvalidTransformError InvalidCoordinateError = _wcs.InvalidCoordinateError NoSolutionError = _wcs.NoSolutionError InvalidSubimageSpecificationError = _wcs.InvalidSubimageSpecificationError NonseparableSubimageCoordinateSystemError = _wcs.NonseparableSubimageCoordinateSystemError NoWcsKeywordsFoundError = _wcs.NoWcsKeywordsFoundError InvalidTabularParametersError = _wcs.InvalidTabularParametersError InvalidPrjParametersError = _wcs.InvalidPrjParametersError # Copy all the constants from the C extension into this module's namespace for key, val in _wcs.__dict__.items(): if key.startswith(('WCSSUB_', 'WCSHDR_', 'WCSHDO_', 'WCSCOMPARE_', 'PRJ_')): locals()[key] = val __all__.append(key) # Set coordinate extraction callback for WCS -TAB: def _load_tab_bintable(hdulist, extnam, extver, extlev, kind, ttype, row, ndim): arr = hdulist[(extnam, extver)].data[ttype][row - 1] if arr.ndim != ndim: if kind == 'c' and ndim == 2: arr = arr.reshape((arr.size, 1)) else: raise ValueError("Bad TDIM") return np.ascontiguousarray(arr, dtype=np.double) _wcs.set_wtbarr_fitsio_callback(_load_tab_bintable) else: WCSBase = object Wcsprm = object DistortionLookupTable = object Sip = object Tabprm = object Wtbarr = object WcsError = None SingularMatrixError = None InconsistentAxisTypesError = None InvalidTransformError = None InvalidCoordinateError = None NoSolutionError = None InvalidSubimageSpecificationError = None NonseparableSubimageCoordinateSystemError = None NoWcsKeywordsFoundError = None InvalidTabularParametersError = None _WCSSUB_TIME_SUPPORT = False _WCS_TPD_WARN_LT71 = False _WCS_TPD_WARN_LT74 = False # Additional relax bit flags WCSHDO_SIP = 0x80000 # Regular expression defining SIP keyword It matches keyword that starts with A # or B, optionally followed by P, followed by an underscore then a number in # range of 0-19, followed by an underscore and another number in range of 0-19. # Keyword optionally ends with a capital letter. SIP_KW = re.compile('''^[AB]P?_1?[0-9]_1?[0-9][A-Z]?$''') def _parse_keysel(keysel): keysel_flags = 0 if keysel is not None: for element in keysel: if element.lower() == 'image': keysel_flags |= _wcs.WCSHDR_IMGHEAD elif element.lower() == 'binary': keysel_flags |= _wcs.WCSHDR_BIMGARR elif element.lower() == 'pixel': keysel_flags |= _wcs.WCSHDR_PIXLIST else: raise ValueError( "keysel must be a list of 'image', 'binary' " + "and/or 'pixel'") else: keysel_flags = -1 return keysel_flags class NoConvergence(Exception): """ An error class used to report non-convergence and/or divergence of numerical methods. It is used to report errors in the iterative solution used by the :py:meth:`~astropy.wcs.WCS.all_world2pix`. Attributes ---------- best_solution : `numpy.ndarray` Best solution achieved by the numerical method. accuracy : `numpy.ndarray` Accuracy of the ``best_solution``. niter : `int` Number of iterations performed by the numerical method to compute ``best_solution``. divergent : None, `numpy.ndarray` Indices of the points in ``best_solution`` array for which the solution appears to be divergent. If the solution does not diverge, ``divergent`` will be set to `None`. slow_conv : None, `numpy.ndarray` Indices of the solutions in ``best_solution`` array for which the solution failed to converge within the specified maximum number of iterations. If there are no non-converging solutions (i.e., if the required accuracy has been achieved for all input data points) then ``slow_conv`` will be set to `None`. """ def __init__(self, *args, best_solution=None, accuracy=None, niter=None, divergent=None, slow_conv=None, **kwargs): super().__init__(*args) self.best_solution = best_solution self.accuracy = accuracy self.niter = niter self.divergent = divergent self.slow_conv = slow_conv if kwargs: warnings.warn("Function received unexpected arguments ({}) these " "are ignored but will raise an Exception in the " "future.".format(list(kwargs)), AstropyDeprecationWarning) class FITSFixedWarning(AstropyWarning): """ The warning raised when the contents of the FITS header have been modified to be standards compliant. """ pass class WCS(FITSWCSAPIMixin, WCSBase): """WCS objects perform standard WCS transformations, and correct for `SIP`_ and `distortion paper`_ table-lookup transformations, based on the WCS keywords and supplementary data read from a FITS file. See also: https://docs.astropy.org/en/stable/wcs/ Parameters ---------- header : `~astropy.io.fits.Header`, `~astropy.io.fits.hdu.image.PrimaryHDU`, `~astropy.io.fits.hdu.image.ImageHDU`, str, dict-like, or None, optional If *header* is not provided or None, the object will be initialized to default values. fobj : `~astropy.io.fits.HDUList`, optional It is needed when header keywords point to a `distortion paper`_ lookup table stored in a different extension. key : str, optional The name of a particular WCS transform to use. This may be either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``\"a\"`` part of the ``CTYPEia`` cards. *key* may only be provided if *header* is also provided. minerr : float, optional The minimum value a distortion correction must have in order to be applied. If the value of ``CQERRja`` is smaller than *minerr*, the corresponding distortion is not applied. relax : bool or int, optional Degree of permissiveness: - `True` (default): Admit all recognized informal extensions of the WCS standard. - `False`: Recognize only FITS keywords defined by the published WCS standard. - `int`: a bit field selecting specific extensions to accept. See :ref:`astropy:relaxread` for details. naxis : int or sequence, optional Extracts specific coordinate axes using :meth:`~astropy.wcs.Wcsprm.sub`. If a header is provided, and *naxis* is not ``None``, *naxis* will be passed to :meth:`~astropy.wcs.Wcsprm.sub` in order to select specific axes from the header. See :meth:`~astropy.wcs.Wcsprm.sub` for more details about this parameter. keysel : sequence of str, optional A sequence of flags used to select the keyword types considered by wcslib. When ``None``, only the standard image header keywords are considered (and the underlying wcspih() C function is called). To use binary table image array or pixel list keywords, *keysel* must be set. Each element in the list should be one of the following strings: - 'image': Image header keywords - 'binary': Binary table image array keywords - 'pixel': Pixel list keywords Keywords such as ``EQUIna`` or ``RFRQna`` that are common to binary table image arrays and pixel lists (including ``WCSNna`` and ``TWCSna``) are selected by both 'binary' and 'pixel'. colsel : sequence of int, optional A sequence of table column numbers used to restrict the WCS transformations considered to only those pertaining to the specified columns. If `None`, there is no restriction. fix : bool, optional When `True` (default), call `~astropy.wcs.Wcsprm.fix` on the resulting object to fix any non-standard uses in the header. `FITSFixedWarning` Warnings will be emitted if any changes were made. translate_units : str, optional Specify which potentially unsafe translations of non-standard unit strings to perform. By default, performs none. See `WCS.fix` for more information about this parameter. Only effective when ``fix`` is `True`. Raises ------ MemoryError Memory allocation failed. ValueError Invalid key. KeyError Key not found in FITS header. ValueError Lookup table distortion present in the header but *fobj* was not provided. Notes ----- 1. astropy.wcs supports arbitrary *n* dimensions for the core WCS (the transformations handled by WCSLIB). However, the `distortion paper`_ lookup table and `SIP`_ distortions must be two dimensional. Therefore, if you try to create a WCS object where the core WCS has a different number of dimensions than 2 and that object also contains a `distortion paper`_ lookup table or `SIP`_ distortion, a `ValueError` exception will be raised. To avoid this, consider using the *naxis* kwarg to select two dimensions from the core WCS. 2. The number of coordinate axes in the transformation is not determined directly from the ``NAXIS`` keyword but instead from the highest of: - ``NAXIS`` keyword - ``WCSAXESa`` keyword - The highest axis number in any parameterized WCS keyword. The keyvalue, as well as the keyword, must be syntactically valid otherwise it will not be considered. If none of these keyword types is present, i.e. if the header only contains auxiliary WCS keywords for a particular coordinate representation, then no coordinate description is constructed for it. The number of axes, which is set as the ``naxis`` member, may differ for different coordinate representations of the same image. 3. When the header includes duplicate keywords, in most cases the last encountered is used. 4. `~astropy.wcs.Wcsprm.set` is called immediately after construction, so any invalid keywords or transformations will be raised by the constructor, not when subsequently calling a transformation method. """ # noqa: E501 def __init__(self, header=None, fobj=None, key=' ', minerr=0.0, relax=True, naxis=None, keysel=None, colsel=None, fix=True, translate_units='', _do_set=True): close_fds = [] # these parameters are stored to be used when unpickling a WCS object: self._init_kwargs = { 'keysel': copy.copy(keysel), 'colsel': copy.copy(colsel), } if header is None: if naxis is None: naxis = 2 wcsprm = _wcs.Wcsprm(header=None, key=key, relax=relax, naxis=naxis) self.naxis = wcsprm.naxis # Set some reasonable defaults. det2im = (None, None) cpdis = (None, None) sip = None else: keysel_flags = _parse_keysel(keysel) if isinstance(header, (str, bytes)): try: is_path = (possible_filename(header) and os.path.exists(header)) except (OSError, ValueError): is_path = False if is_path: if fobj is not None: raise ValueError( "Can not provide both a FITS filename to " "argument 1 and a FITS file object to argument 2") fobj = fits.open(header) close_fds.append(fobj) header = fobj[0].header elif isinstance(header, fits.hdu.image._ImageBaseHDU): header = header.header elif not isinstance(header, fits.Header): try: # Accept any dict-like object orig_header = header header = fits.Header() for dict_key in orig_header.keys(): header[dict_key] = orig_header[dict_key] except TypeError: raise TypeError( "header must be a string, an astropy.io.fits.Header " "object, or a dict-like object") if isinstance(header, fits.Header): header_string = header.tostring().rstrip() else: header_string = header # Importantly, header is a *copy* of the passed-in header # because we will be modifying it if isinstance(header_string, str): header_bytes = header_string.encode('ascii') header_string = header_string else: header_bytes = header_string header_string = header_string.decode('ascii') if not (fobj is None or isinstance(fobj, fits.HDUList)): raise AssertionError("'fobj' must be either None or an " "astropy.io.fits.HDUList object.") est_naxis = 2 try: tmp_header = fits.Header.fromstring(header_string) self._remove_sip_kw(tmp_header) tmp_header_bytes = tmp_header.tostring().rstrip() if isinstance(tmp_header_bytes, str): tmp_header_bytes = tmp_header_bytes.encode('ascii') tmp_wcsprm = _wcs.Wcsprm(header=tmp_header_bytes, key=key, relax=relax, keysel=keysel_flags, colsel=colsel, warnings=False, hdulist=fobj) if naxis is not None: try: tmp_wcsprm = tmp_wcsprm.sub(naxis) except ValueError: pass est_naxis = tmp_wcsprm.naxis if tmp_wcsprm.naxis else 2 except _wcs.NoWcsKeywordsFoundError: pass self.naxis = est_naxis header = fits.Header.fromstring(header_string) det2im = self._read_det2im_kw(header, fobj, err=minerr) cpdis = self._read_distortion_kw( header, fobj, dist='CPDIS', err=minerr) sip = self._read_sip_kw(header, wcskey=key) self._remove_sip_kw(header) header_string = header.tostring() header_string = header_string.replace('END' + ' ' * 77, '') if isinstance(header_string, str): header_bytes = header_string.encode('ascii') header_string = header_string else: header_bytes = header_string header_string = header_string.decode('ascii') try: wcsprm = _wcs.Wcsprm(header=header_bytes, key=key, relax=relax, keysel=keysel_flags, colsel=colsel, hdulist=fobj) except _wcs.NoWcsKeywordsFoundError: # The header may have SIP or distortions, but no core # WCS. That isn't an error -- we want a "default" # (identity) core Wcs transformation in that case. if colsel is None: wcsprm = _wcs.Wcsprm(header=None, key=key, relax=relax, keysel=keysel_flags, colsel=colsel, hdulist=fobj) else: raise if naxis is not None: wcsprm = wcsprm.sub(naxis) self.naxis = wcsprm.naxis if (wcsprm.naxis != 2 and (det2im[0] or det2im[1] or cpdis[0] or cpdis[1] or sip)): raise ValueError( """ FITS WCS distortion paper lookup tables and SIP distortions only work in 2 dimensions. However, WCSLIB has detected {} dimensions in the core WCS keywords. To use core WCS in conjunction with FITS WCS distortion paper lookup tables or SIP distortion, you must select or reduce these to 2 dimensions using the naxis kwarg. """.format(wcsprm.naxis)) header_naxis = header.get('NAXIS', None) if header_naxis is not None and header_naxis < wcsprm.naxis: warnings.warn( "The WCS transformation has more axes ({:d}) than the " "image it is associated with ({:d})".format( wcsprm.naxis, header_naxis), FITSFixedWarning) self._get_naxis(header) WCSBase.__init__(self, sip, cpdis, wcsprm, det2im) if fix: if header is None: with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) self.fix(translate_units=translate_units) else: self.fix(translate_units=translate_units) if _do_set: self.wcs.set() for fd in close_fds: fd.close() self._pixel_bounds = None def __copy__(self): new_copy = self.__class__() WCSBase.__init__(new_copy, self.sip, (self.cpdis1, self.cpdis2), self.wcs, (self.det2im1, self.det2im2)) new_copy.__dict__.update(self.__dict__) return new_copy def __deepcopy__(self, memo): from copy import deepcopy new_copy = self.__class__() new_copy.naxis = deepcopy(self.naxis, memo) WCSBase.__init__(new_copy, deepcopy(self.sip, memo), (deepcopy(self.cpdis1, memo), deepcopy(self.cpdis2, memo)), deepcopy(self.wcs, memo), (deepcopy(self.det2im1, memo), deepcopy(self.det2im2, memo))) for key, val in self.__dict__.items(): new_copy.__dict__[key] = deepcopy(val, memo) return new_copy def copy(self): """ Return a shallow copy of the object. Convenience method so user doesn't have to import the :mod:`copy` stdlib module. .. warning:: Use `deepcopy` instead of `copy` unless you know why you need a shallow copy. """ return copy.copy(self) def deepcopy(self): """ Return a deep copy of the object. Convenience method so user doesn't have to import the :mod:`copy` stdlib module. """ return copy.deepcopy(self) def sub(self, axes=None): copy = self.deepcopy() # We need to know which axes have been dropped, but there is no easy # way to do this with the .sub function, so instead we assign UUIDs to # the CNAME parameters in copy.wcs. We can later access the original # CNAME properties from self.wcs. cname_uuid = [str(uuid.uuid4()) for i in range(copy.wcs.naxis)] copy.wcs.cname = cname_uuid # Subset the WCS copy.wcs = copy.wcs.sub(axes) copy.naxis = copy.wcs.naxis # Construct a list of dimensions from the original WCS in the order # in which they appear in the final WCS. keep = [cname_uuid.index(cname) if cname in cname_uuid else None for cname in copy.wcs.cname] # Restore the original CNAMEs copy.wcs.cname = ['' if i is None else self.wcs.cname[i] for i in keep] # Subset pixel_shape and pixel_bounds if self.pixel_shape: copy.pixel_shape = tuple([None if i is None else self.pixel_shape[i] for i in keep]) if self.pixel_bounds: copy.pixel_bounds = [None if i is None else self.pixel_bounds[i] for i in keep] return copy if _wcs is not None: sub.__doc__ = _wcs.Wcsprm.sub.__doc__ def _fix_scamp(self): """ Remove SCAMP's PVi_m distortion parameters if SIP distortion parameters are also present. Some projects (e.g., Palomar Transient Factory) convert SCAMP's distortion parameters (which abuse the PVi_m cards) to SIP. However, wcslib gets confused by the presence of both SCAMP and SIP distortion parameters. See https://github.com/astropy/astropy/issues/299. """ # Nothing to be done if no WCS attached if self.wcs is None: return # Nothing to be done if no PV parameters attached pv = self.wcs.get_pv() if not pv: return # Nothing to be done if axes don't use SIP distortion parameters if self.sip is None: return # Nothing to be done if any radial terms are present... # Loop over list to find any radial terms. # Certain values of the `j' index are used for storing # radial terms; refer to Equation (1) in # <http://web.ipac.caltech.edu/staff/shupe/reprints/SIP_to_PV_SPIE2012.pdf>. pv = np.asarray(pv) # Loop over distinct values of `i' index for i in set(pv[:, 0]): # Get all values of `j' index for this value of `i' index js = set(pv[:, 1][pv[:, 0] == i]) # Find max value of `j' index max_j = max(js) for j in (3, 11, 23, 39): if j < max_j and j in js: return self.wcs.set_pv([]) warnings.warn("Removed redundant SCAMP distortion parameters " + "because SIP parameters are also present", FITSFixedWarning) def fix(self, translate_units='', naxis=None): """ Perform the fix operations from wcslib, and warn about any changes it has made. Parameters ---------- translate_units : str, optional Specify which potentially unsafe translations of non-standard unit strings to perform. By default, performs none. Although ``"S"`` is commonly used to represent seconds, its translation to ``"s"`` is potentially unsafe since the standard recognizes ``"S"`` formally as Siemens, however rarely that may be used. The same applies to ``"H"`` for hours (Henry), and ``"D"`` for days (Debye). This string controls what to do in such cases, and is case-insensitive. - If the string contains ``"s"``, translate ``"S"`` to ``"s"``. - If the string contains ``"h"``, translate ``"H"`` to ``"h"``. - If the string contains ``"d"``, translate ``"D"`` to ``"d"``. Thus ``''`` doesn't do any unsafe translations, whereas ``'shd'`` does all of them. naxis : int array, optional Image axis lengths. If this array is set to zero or ``None``, then `~astropy.wcs.Wcsprm.cylfix` will not be invoked. """ if self.wcs is not None: self._fix_scamp() fixes = self.wcs.fix(translate_units, naxis) for key, val in fixes.items(): if val != "No change": if (key == 'datfix' and '1858-11-17' in val and not np.count_nonzero(self.wcs.mjdref)): continue warnings.warn( ("'{0}' made the change '{1}'."). format(key, val), FITSFixedWarning) def calc_footprint(self, header=None, undistort=True, axes=None, center=True): """ Calculates the footprint of the image on the sky. A footprint is defined as the positions of the corners of the image on the sky after all available distortions have been applied. Parameters ---------- header : `~astropy.io.fits.Header` object, optional Used to get ``NAXIS1`` and ``NAXIS2`` header and axes are mutually exclusive, alternative ways to provide the same information. undistort : bool, optional If `True`, take SIP and distortion lookup table into account axes : (int, int), optional If provided, use the given sequence as the shape of the image. Otherwise, use the ``NAXIS1`` and ``NAXIS2`` keywords from the header that was used to create this `WCS` object. center : bool, optional If `True` use the center of the pixel, otherwise use the corner. Returns ------- coord : (4, 2) array of (*x*, *y*) coordinates. The order is clockwise starting with the bottom left corner. """ if axes is not None: naxis1, naxis2 = axes else: if header is None: try: # classes that inherit from WCS and define naxis1/2 # do not require a header parameter naxis1, naxis2 = self.pixel_shape except (AttributeError, TypeError): warnings.warn( "Need a valid header in order to calculate footprint\n", AstropyUserWarning) return None else: naxis1 = header.get('NAXIS1', None) naxis2 = header.get('NAXIS2', None) if naxis1 is None or naxis2 is None: raise ValueError( "Image size could not be determined.") if center: corners = np.array([[1, 1], [1, naxis2], [naxis1, naxis2], [naxis1, 1]], dtype=np.float64) else: corners = np.array([[0.5, 0.5], [0.5, naxis2 + 0.5], [naxis1 + 0.5, naxis2 + 0.5], [naxis1 + 0.5, 0.5]], dtype=np.float64) if undistort: return self.all_pix2world(corners, 1) else: return self.wcs_pix2world(corners, 1) def _read_det2im_kw(self, header, fobj, err=0.0): """ Create a `distortion paper`_ type lookup table for detector to image plane correction. """ if fobj is None: return (None, None) if not isinstance(fobj, fits.HDUList): return (None, None) try: axiscorr = header['AXISCORR'] d2imdis = self._read_d2im_old_format(header, fobj, axiscorr) return d2imdis except KeyError: pass dist = 'D2IMDIS' d_kw = 'D2IM' err_kw = 'D2IMERR' tables = {} for i in range(1, self.naxis + 1): d_error = header.get(err_kw + str(i), 0.0) if d_error < err: tables[i] = None continue distortion = dist + str(i) if distortion in header: dis = header[distortion].lower() if dis == 'lookup': del header[distortion] assert isinstance(fobj, fits.HDUList), ( 'An astropy.io.fits.HDUList' 'is required for Lookup table distortion.') dp = (d_kw + str(i)).strip() dp_extver_key = dp + '.EXTVER' if dp_extver_key in header: d_extver = header[dp_extver_key] del header[dp_extver_key] else: d_extver = 1 dp_axis_key = dp + f'.AXIS.{i:d}' if i == header[dp_axis_key]: d_data = fobj['D2IMARR', d_extver].data else: d_data = (fobj['D2IMARR', d_extver].data).transpose() del header[dp_axis_key] d_header = fobj['D2IMARR', d_extver].header d_crpix = (d_header.get('CRPIX1', 0.0), d_header.get('CRPIX2', 0.0)) d_crval = (d_header.get('CRVAL1', 0.0), d_header.get('CRVAL2', 0.0)) d_cdelt = (d_header.get('CDELT1', 1.0), d_header.get('CDELT2', 1.0)) d_lookup = DistortionLookupTable(d_data, d_crpix, d_crval, d_cdelt) tables[i] = d_lookup else: warnings.warn('Polynomial distortion is not implemented.\n', AstropyUserWarning) for key in set(header): if key.startswith(dp + '.'): del header[key] else: tables[i] = None if not tables: return (None, None) else: return (tables.get(1), tables.get(2)) def _read_d2im_old_format(self, header, fobj, axiscorr): warnings.warn( "The use of ``AXISCORR`` for D2IM correction has been deprecated." "`~astropy.wcs` will read in files with ``AXISCORR`` but ``to_fits()`` will write " "out files without it.", AstropyDeprecationWarning) cpdis = [None, None] crpix = [0., 0.] crval = [0., 0.] cdelt = [1., 1.] try: d2im_data = fobj[('D2IMARR', 1)].data except KeyError: return (None, None) except AttributeError: return (None, None) d2im_data = np.array([d2im_data]) d2im_hdr = fobj[('D2IMARR', 1)].header naxis = d2im_hdr['NAXIS'] for i in range(1, naxis + 1): crpix[i - 1] = d2im_hdr.get('CRPIX' + str(i), 0.0) crval[i - 1] = d2im_hdr.get('CRVAL' + str(i), 0.0) cdelt[i - 1] = d2im_hdr.get('CDELT' + str(i), 1.0) cpdis = DistortionLookupTable(d2im_data, crpix, crval, cdelt) if axiscorr == 1: return (cpdis, None) elif axiscorr == 2: return (None, cpdis) else: warnings.warn("Expected AXISCORR to be 1 or 2", AstropyUserWarning) return (None, None) def _write_det2im(self, hdulist): """ Writes a `distortion paper`_ type lookup table to the given `~astropy.io.fits.HDUList`. """ if self.det2im1 is None and self.det2im2 is None: return dist = 'D2IMDIS' d_kw = 'D2IM' def write_d2i(num, det2im): if det2im is None: return hdulist[0].header[f'{dist}{num:d}'] = ( 'LOOKUP', 'Detector to image correction type') hdulist[0].header[f'{d_kw}{num:d}.EXTVER'] = ( num, 'Version number of WCSDVARR extension') hdulist[0].header[f'{d_kw}{num:d}.NAXES'] = ( len(det2im.data.shape), 'Number of independent variables in D2IM function') for i in range(det2im.data.ndim): jth = {1: '1st', 2: '2nd', 3: '3rd'}.get(i + 1, f'{i + 1}th') hdulist[0].header[f'{d_kw}{num:d}.AXIS.{i + 1:d}'] = ( i + 1, f'Axis number of the {jth} variable in a D2IM function') image = fits.ImageHDU(det2im.data, name='D2IMARR') header = image.header header['CRPIX1'] = (det2im.crpix[0], 'Coordinate system reference pixel') header['CRPIX2'] = (det2im.crpix[1], 'Coordinate system reference pixel') header['CRVAL1'] = (det2im.crval[0], 'Coordinate system value at reference pixel') header['CRVAL2'] = (det2im.crval[1], 'Coordinate system value at reference pixel') header['CDELT1'] = (det2im.cdelt[0], 'Coordinate increment along axis') header['CDELT2'] = (det2im.cdelt[1], 'Coordinate increment along axis') image.ver = int(hdulist[0].header[f'{d_kw}{num:d}.EXTVER']) hdulist.append(image) write_d2i(1, self.det2im1) write_d2i(2, self.det2im2) def _read_distortion_kw(self, header, fobj, dist='CPDIS', err=0.0): """ Reads `distortion paper`_ table-lookup keywords and data, and returns a 2-tuple of `~astropy.wcs.DistortionLookupTable` objects. If no `distortion paper`_ keywords are found, ``(None, None)`` is returned. """ if isinstance(header, (str, bytes)): return (None, None) if dist == 'CPDIS': d_kw = 'DP' err_kw = 'CPERR' else: d_kw = 'DQ' err_kw = 'CQERR' tables = {} for i in range(1, self.naxis + 1): d_error_key = err_kw + str(i) if d_error_key in header: d_error = header[d_error_key] del header[d_error_key] else: d_error = 0.0 if d_error < err: tables[i] = None continue distortion = dist + str(i) if distortion in header: dis = header[distortion].lower() del header[distortion] if dis == 'lookup': if not isinstance(fobj, fits.HDUList): raise ValueError('an astropy.io.fits.HDUList is ' 'required for Lookup table distortion.') dp = (d_kw + str(i)).strip() dp_extver_key = dp + '.EXTVER' if dp_extver_key in header: d_extver = header[dp_extver_key] del header[dp_extver_key] else: d_extver = 1 dp_axis_key = dp + f'.AXIS.{i:d}' if i == header[dp_axis_key]: d_data = fobj['WCSDVARR', d_extver].data else: d_data = (fobj['WCSDVARR', d_extver].data).transpose() del header[dp_axis_key] d_header = fobj['WCSDVARR', d_extver].header d_crpix = (d_header.get('CRPIX1', 0.0), d_header.get('CRPIX2', 0.0)) d_crval = (d_header.get('CRVAL1', 0.0), d_header.get('CRVAL2', 0.0)) d_cdelt = (d_header.get('CDELT1', 1.0), d_header.get('CDELT2', 1.0)) d_lookup = DistortionLookupTable(d_data, d_crpix, d_crval, d_cdelt) tables[i] = d_lookup for key in set(header): if key.startswith(dp + '.'): del header[key] else: warnings.warn('Polynomial distortion is not implemented.\n', AstropyUserWarning) else: tables[i] = None if not tables: return (None, None) else: return (tables.get(1), tables.get(2)) def _write_distortion_kw(self, hdulist, dist='CPDIS'): """ Write out `distortion paper`_ keywords to the given `~astropy.io.fits.HDUList`. """ if self.cpdis1 is None and self.cpdis2 is None: return if dist == 'CPDIS': d_kw = 'DP' else: d_kw = 'DQ' def write_dist(num, cpdis): if cpdis is None: return hdulist[0].header[f'{dist}{num:d}'] = ( 'LOOKUP', 'Prior distortion function type') hdulist[0].header[f'{d_kw}{num:d}.EXTVER'] = ( num, 'Version number of WCSDVARR extension') hdulist[0].header[f'{d_kw}{num:d}.NAXES'] = ( len(cpdis.data.shape), f'Number of independent variables in {dist} function') for i in range(cpdis.data.ndim): jth = {1: '1st', 2: '2nd', 3: '3rd'}.get(i + 1, f'{i + 1}th') hdulist[0].header[f'{d_kw}{num:d}.AXIS.{i + 1:d}'] = ( i + 1, f'Axis number of the {jth} variable in a {dist} function') image = fits.ImageHDU(cpdis.data, name='WCSDVARR') header = image.header header['CRPIX1'] = (cpdis.crpix[0], 'Coordinate system reference pixel') header['CRPIX2'] = (cpdis.crpix[1], 'Coordinate system reference pixel') header['CRVAL1'] = (cpdis.crval[0], 'Coordinate system value at reference pixel') header['CRVAL2'] = (cpdis.crval[1], 'Coordinate system value at reference pixel') header['CDELT1'] = (cpdis.cdelt[0], 'Coordinate increment along axis') header['CDELT2'] = (cpdis.cdelt[1], 'Coordinate increment along axis') image.ver = int(hdulist[0].header[f'{d_kw}{num:d}.EXTVER']) hdulist.append(image) write_dist(1, self.cpdis1) write_dist(2, self.cpdis2) def _remove_sip_kw(self, header): """ Remove SIP information from a header. """ # Never pass SIP coefficients to wcslib # CTYPE must be passed with -SIP to wcslib for key in set(m.group() for m in map(SIP_KW.match, list(header)) if m is not None): del header[key] def _read_sip_kw(self, header, wcskey=""): """ Reads `SIP`_ header keywords and returns a `~astropy.wcs.Sip` object. If no `SIP`_ header keywords are found, ``None`` is returned. """ if isinstance(header, (str, bytes)): # TODO: Parse SIP from a string without pyfits around return None if "A_ORDER" in header and header['A_ORDER'] > 1: if "B_ORDER" not in header: raise ValueError( "A_ORDER provided without corresponding B_ORDER " "keyword for SIP distortion") m = int(header["A_ORDER"]) a = np.zeros((m + 1, m + 1), np.double) for i in range(m + 1): for j in range(m - i + 1): key = f"A_{i}_{j}" if key in header: a[i, j] = header[key] del header[key] m = int(header["B_ORDER"]) if m > 1: b = np.zeros((m + 1, m + 1), np.double) for i in range(m + 1): for j in range(m - i + 1): key = f"B_{i}_{j}" if key in header: b[i, j] = header[key] del header[key] else: a = None b = None del header['A_ORDER'] del header['B_ORDER'] ctype = [header[f'CTYPE{nax}{wcskey}'] for nax in range(1, self.naxis + 1)] if any(not ctyp.endswith('-SIP') for ctyp in ctype): message = """ Inconsistent SIP distortion information is present in the FITS header and the WCS object: SIP coefficients were detected, but CTYPE is missing a "-SIP" suffix. astropy.wcs is using the SIP distortion coefficients, therefore the coordinates calculated here might be incorrect. If you do not want to apply the SIP distortion coefficients, please remove the SIP coefficients from the FITS header or the WCS object. As an example, if the image is already distortion-corrected (e.g., drizzled) then distortion components should not apply and the SIP coefficients should be removed. While the SIP distortion coefficients are being applied here, if that was indeed the intent, for consistency please append "-SIP" to the CTYPE in the FITS header or the WCS object. """ # noqa: E501 log.info(message) elif "B_ORDER" in header and header['B_ORDER'] > 1: raise ValueError( "B_ORDER provided without corresponding A_ORDER " + "keyword for SIP distortion") else: a = None b = None if "AP_ORDER" in header and header['AP_ORDER'] > 1: if "BP_ORDER" not in header: raise ValueError( "AP_ORDER provided without corresponding BP_ORDER " "keyword for SIP distortion") m = int(header["AP_ORDER"]) ap = np.zeros((m + 1, m + 1), np.double) for i in range(m + 1): for j in range(m - i + 1): key = f"AP_{i}_{j}" if key in header: ap[i, j] = header[key] del header[key] m = int(header["BP_ORDER"]) if m > 1: bp = np.zeros((m + 1, m + 1), np.double) for i in range(m + 1): for j in range(m - i + 1): key = f"BP_{i}_{j}" if key in header: bp[i, j] = header[key] del header[key] else: ap = None bp = None del header['AP_ORDER'] del header['BP_ORDER'] elif "BP_ORDER" in header and header['BP_ORDER'] > 1: raise ValueError( "BP_ORDER provided without corresponding AP_ORDER " "keyword for SIP distortion") else: ap = None bp = None if a is None and b is None and ap is None and bp is None: return None if f"CRPIX1{wcskey}" not in header or f"CRPIX2{wcskey}" not in header: raise ValueError( "Header has SIP keywords without CRPIX keywords") crpix1 = header.get(f"CRPIX1{wcskey}") crpix2 = header.get(f"CRPIX2{wcskey}") return Sip(a, b, ap, bp, (crpix1, crpix2)) def _write_sip_kw(self): """ Write out SIP keywords. Returns a dictionary of key-value pairs. """ if self.sip is None: return {} keywords = {} def write_array(name, a): if a is None: return size = a.shape[0] trdir = 'sky to detector' if name[-1] == 'P' else 'detector to sky' comment = ('SIP polynomial order, axis {:d}, {:s}' .format(ord(name[0]) - ord('A'), trdir)) keywords[f'{name}_ORDER'] = size - 1, comment comment = 'SIP distortion coefficient' for i in range(size): for j in range(size - i): if a[i, j] != 0.0: keywords[ f'{name}_{i:d}_{j:d}'] = a[i, j], comment write_array('A', self.sip.a) write_array('B', self.sip.b) write_array('AP', self.sip.ap) write_array('BP', self.sip.bp) return keywords def _denormalize_sky(self, sky): if self.wcs.lngtyp != 'RA': raise ValueError( "WCS does not have longitude type of 'RA', therefore " + "(ra, dec) data can not be used as input") if self.wcs.lattyp != 'DEC': raise ValueError( "WCS does not have longitude type of 'DEC', therefore " + "(ra, dec) data can not be used as input") if self.wcs.naxis == 2: if self.wcs.lng == 0 and self.wcs.lat == 1: return sky elif self.wcs.lng == 1 and self.wcs.lat == 0: # Reverse the order of the columns return sky[:, ::-1] else: raise ValueError( "WCS does not have longitude and latitude celestial " + "axes, therefore (ra, dec) data can not be used as input") else: if self.wcs.lng < 0 or self.wcs.lat < 0: raise ValueError( "WCS does not have both longitude and latitude " "celestial axes, therefore (ra, dec) data can not be " + "used as input") out = np.zeros((sky.shape[0], self.wcs.naxis)) out[:, self.wcs.lng] = sky[:, 0] out[:, self.wcs.lat] = sky[:, 1] return out def _normalize_sky(self, sky): if self.wcs.lngtyp != 'RA': raise ValueError( "WCS does not have longitude type of 'RA', therefore " + "(ra, dec) data can not be returned") if self.wcs.lattyp != 'DEC': raise ValueError( "WCS does not have longitude type of 'DEC', therefore " + "(ra, dec) data can not be returned") if self.wcs.naxis == 2: if self.wcs.lng == 0 and self.wcs.lat == 1: return sky elif self.wcs.lng == 1 and self.wcs.lat == 0: # Reverse the order of the columns return sky[:, ::-1] else: raise ValueError( "WCS does not have longitude and latitude celestial " "axes, therefore (ra, dec) data can not be returned") else: if self.wcs.lng < 0 or self.wcs.lat < 0: raise ValueError( "WCS does not have both longitude and latitude celestial " "axes, therefore (ra, dec) data can not be returned") out = np.empty((sky.shape[0], 2)) out[:, 0] = sky[:, self.wcs.lng] out[:, 1] = sky[:, self.wcs.lat] return out def _array_converter(self, func, sky, *args, ra_dec_order=False): """ A helper function to support reading either a pair of arrays or a single Nx2 array. """ def _return_list_of_arrays(axes, origin): if any([x.size == 0 for x in axes]): return axes try: axes = np.broadcast_arrays(*axes) except ValueError: raise ValueError( "Coordinate arrays are not broadcastable to each other") xy = np.hstack([x.reshape((x.size, 1)) for x in axes]) if ra_dec_order and sky == 'input': xy = self._denormalize_sky(xy) output = func(xy, origin) if ra_dec_order and sky == 'output': output = self._normalize_sky(output) return (output[:, 0].reshape(axes[0].shape), output[:, 1].reshape(axes[0].shape)) return [output[:, i].reshape(axes[0].shape) for i in range(output.shape[1])] def _return_single_array(xy, origin): if xy.shape[-1] != self.naxis: raise ValueError( "When providing two arguments, the array must be " "of shape (N, {})".format(self.naxis)) if 0 in xy.shape: return xy if ra_dec_order and sky == 'input': xy = self._denormalize_sky(xy) result = func(xy, origin) if ra_dec_order and sky == 'output': result = self._normalize_sky(result) return result if len(args) == 2: try: xy, origin = args xy = np.asarray(xy) origin = int(origin) except Exception: raise TypeError( "When providing two arguments, they must be " "(coords[N][{}], origin)".format(self.naxis)) if xy.shape == () or len(xy.shape) == 1: return _return_list_of_arrays([xy], origin) return _return_single_array(xy, origin) elif len(args) == self.naxis + 1: axes = args[:-1] origin = args[-1] try: axes = [np.asarray(x) for x in axes] origin = int(origin) except Exception: raise TypeError( "When providing more than two arguments, they must be " + "a 1-D array for each axis, followed by an origin.") return _return_list_of_arrays(axes, origin) raise TypeError( "WCS projection has {0} dimensions, so expected 2 (an Nx{0} array " "and the origin argument) or {1} arguments (the position in each " "dimension, and the origin argument). Instead, {2} arguments were " "given.".format( self.naxis, self.naxis + 1, len(args))) def all_pix2world(self, *args, **kwargs): return self._array_converter( self._all_pix2world, 'output', *args, **kwargs) all_pix2world.__doc__ = """ Transforms pixel coordinates to world coordinates. Performs all of the following in series: - Detector to image plane correction (if present in the FITS file) - `SIP`_ distortion correction (if present in the FITS file) - `distortion paper`_ table-lookup correction (if present in the FITS file) - `wcslib`_ "core" WCS transformation Parameters ---------- {} For a transformation that is not two-dimensional, the two-argument form must be used. {} Returns ------- {} Notes ----- The order of the axes for the result is determined by the ``CTYPEia`` keywords in the FITS header, therefore it may not always be of the form (*ra*, *dec*). The `~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`, `~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp` members can be used to determine the order of the axes. Raises ------ MemoryError Memory allocation failed. SingularMatrixError Linear transformation matrix is singular. InconsistentAxisTypesError Inconsistent or unrecognized coordinate axis types. ValueError Invalid parameter value. ValueError Invalid coordinate transformation parameters. ValueError x- and y-coordinate arrays are not the same size. InvalidTransformError Invalid coordinate transformation parameters. InvalidTransformError Ill-conditioned coordinate transformation parameters. """.format(docstrings.TWO_OR_MORE_ARGS('naxis', 8), docstrings.RA_DEC_ORDER(8), docstrings.RETURNS('sky coordinates, in degrees', 8)) def wcs_pix2world(self, *args, **kwargs): if self.wcs is None: raise ValueError("No basic WCS settings were created.") return self._array_converter( lambda xy, o: self.wcs.p2s(xy, o)['world'], 'output', *args, **kwargs) wcs_pix2world.__doc__ = """ Transforms pixel coordinates to world coordinates by doing only the basic `wcslib`_ transformation. No `SIP`_ or `distortion paper`_ table lookup correction is applied. To perform distortion correction, see `~astropy.wcs.WCS.all_pix2world`, `~astropy.wcs.WCS.sip_pix2foc`, `~astropy.wcs.WCS.p4_pix2foc`, or `~astropy.wcs.WCS.pix2foc`. Parameters ---------- {} For a transformation that is not two-dimensional, the two-argument form must be used. {} Returns ------- {} Raises ------ MemoryError Memory allocation failed. SingularMatrixError Linear transformation matrix is singular. InconsistentAxisTypesError Inconsistent or unrecognized coordinate axis types. ValueError Invalid parameter value. ValueError Invalid coordinate transformation parameters. ValueError x- and y-coordinate arrays are not the same size. InvalidTransformError Invalid coordinate transformation parameters. InvalidTransformError Ill-conditioned coordinate transformation parameters. Notes ----- The order of the axes for the result is determined by the ``CTYPEia`` keywords in the FITS header, therefore it may not always be of the form (*ra*, *dec*). The `~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`, `~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp` members can be used to determine the order of the axes. """.format(docstrings.TWO_OR_MORE_ARGS('naxis', 8), docstrings.RA_DEC_ORDER(8), docstrings.RETURNS('world coordinates, in degrees', 8)) def _all_world2pix(self, world, origin, tolerance, maxiter, adaptive, detect_divergence, quiet): # ############################################################ # # DESCRIPTION OF THE NUMERICAL METHOD ## # ############################################################ # In this section I will outline the method of solving # the inverse problem of converting world coordinates to # pixel coordinates (*inverse* of the direct transformation # `all_pix2world`) and I will summarize some of the aspects # of the method proposed here and some of the issues of the # original `all_world2pix` (in relation to this method) # discussed in https://github.com/astropy/astropy/issues/1977 # A more detailed discussion can be found here: # https://github.com/astropy/astropy/pull/2373 # # # ### Background ### # # # I will refer here to the [SIP Paper] # (http://fits.gsfc.nasa.gov/registry/sip/SIP_distortion_v1_0.pdf). # According to this paper, the effect of distortions as # described in *their* equation (1) is: # # (1) x = CD*(u+f(u)), # # where `x` is a *vector* of "intermediate spherical # coordinates" (equivalent to (x,y) in the paper) and `u` # is a *vector* of "pixel coordinates", and `f` is a vector # function describing geometrical distortions # (see equations 2 and 3 in SIP Paper. # However, I prefer to use `w` for "intermediate world # coordinates", `x` for pixel coordinates, and assume that # transformation `W` performs the **linear** # (CD matrix + projection onto celestial sphere) part of the # conversion from pixel coordinates to world coordinates. # Then we can re-write (1) as: # # (2) w = W*(x+f(x)) = T(x) # # In `astropy.wcs.WCS` transformation `W` is represented by # the `wcs_pix2world` member, while the combined ("total") # transformation (linear part + distortions) is performed by # `all_pix2world`. Below I summarize the notations and their # equivalents in `astropy.wcs.WCS`: # # | Equation term | astropy.WCS/meaning | # | ------------- | ---------------------------- | # | `x` | pixel coordinates | # | `w` | world coordinates | # | `W` | `wcs_pix2world()` | # | `W^{-1}` | `wcs_world2pix()` | # | `T` | `all_pix2world()` | # | `x+f(x)` | `pix2foc()` | # # # ### Direct Solving of Equation (2) ### # # # In order to find the pixel coordinates that correspond to # given world coordinates `w`, it is necessary to invert # equation (2): `x=T^{-1}(w)`, or solve equation `w==T(x)` # for `x`. However, this approach has the following # disadvantages: # 1. It requires unnecessary transformations (see next # section). # 2. It is prone to "RA wrapping" issues as described in # https://github.com/astropy/astropy/issues/1977 # (essentially because `all_pix2world` may return points with # a different phase than user's input `w`). # # # ### Description of the Method Used here ### # # # By applying inverse linear WCS transformation (`W^{-1}`) # to both sides of equation (2) and introducing notation `x'` # (prime) for the pixels coordinates obtained from the world # coordinates by applying inverse *linear* WCS transformation # ("focal plane coordinates"): # # (3) x' = W^{-1}(w) # # we obtain the following equation: # # (4) x' = x+f(x), # # or, # # (5) x = x'-f(x) # # This equation is well suited for solving using the method # of fixed-point iterations # (http://en.wikipedia.org/wiki/Fixed-point_iteration): # # (6) x_{i+1} = x'-f(x_i) # # As an initial value of the pixel coordinate `x_0` we take # "focal plane coordinate" `x'=W^{-1}(w)=wcs_world2pix(w)`. # We stop iterations when `|x_{i+1}-x_i|<tolerance`. We also # consider the process to be diverging if # `|x_{i+1}-x_i|>|x_i-x_{i-1}|` # **when** `|x_{i+1}-x_i|>=tolerance` (when current # approximation is close to the true solution, # `|x_{i+1}-x_i|>|x_i-x_{i-1}|` may be due to rounding errors # and we ignore such "divergences" when # `|x_{i+1}-x_i|<tolerance`). It may appear that checking for # `|x_{i+1}-x_i|<tolerance` in order to ignore divergence is # unnecessary since the iterative process should stop anyway, # however, the proposed implementation of this iterative # process is completely vectorized and, therefore, we may # continue iterating over *some* points even though they have # converged to within a specified tolerance (while iterating # over other points that have not yet converged to # a solution). # # In order to efficiently implement iterative process (6) # using available methods in `astropy.wcs.WCS`, we add and # subtract `x_i` from the right side of equation (6): # # (7) x_{i+1} = x'-(x_i+f(x_i))+x_i = x'-pix2foc(x_i)+x_i, # # where `x'=wcs_world2pix(w)` and it is computed only *once* # before the beginning of the iterative process (and we also # set `x_0=x'`). By using `pix2foc` at each iteration instead # of `all_pix2world` we get about 25% increase in performance # (by not performing the linear `W` transformation at each # step) and we also avoid the "RA wrapping" issue described # above (by working in focal plane coordinates and avoiding # pix->world transformations). # # As an added benefit, the process converges to the correct # solution in just one iteration when distortions are not # present (compare to # https://github.com/astropy/astropy/issues/1977 and # https://github.com/astropy/astropy/pull/2294): in this case # `pix2foc` is the identical transformation # `x_i=pix2foc(x_i)` and from equation (7) we get: # # x' = x_0 = wcs_world2pix(w) # x_1 = x' - pix2foc(x_0) + x_0 = x' - pix2foc(x') + x' = x' # = wcs_world2pix(w) = x_0 # => # |x_1-x_0| = 0 < tolerance (with tolerance > 0) # # However, for performance reasons, it is still better to # avoid iterations altogether and return the exact linear # solution (`wcs_world2pix`) right-away when non-linear # distortions are not present by checking that attributes # `sip`, `cpdis1`, `cpdis2`, `det2im1`, and `det2im2` are # *all* `None`. # # # ### Outline of the Algorithm ### # # # While the proposed code is relatively long (considering # the simplicity of the algorithm), this is due to: 1) # checking if iterative solution is necessary at all; 2) # checking for divergence; 3) re-implementation of the # completely vectorized algorithm as an "adaptive" vectorized # algorithm (for cases when some points diverge for which we # want to stop iterations). In my tests, the adaptive version # of the algorithm is about 50% slower than non-adaptive # version for all HST images. # # The essential part of the vectorized non-adaptive algorithm # (without divergence and other checks) can be described # as follows: # # pix0 = self.wcs_world2pix(world, origin) # pix = pix0.copy() # 0-order solution # # for k in range(maxiter): # # find correction to the previous solution: # dpix = self.pix2foc(pix, origin) - pix0 # # # compute norm (L2) of the correction: # dn = np.linalg.norm(dpix, axis=1) # # # apply correction: # pix -= dpix # # # check convergence: # if np.max(dn) < tolerance: # break # # return pix # # Here, the input parameter `world` can be a `MxN` array # where `M` is the number of coordinate axes in WCS and `N` # is the number of points to be converted simultaneously to # image coordinates. # # # ### IMPORTANT NOTE: ### # # If, in the future releases of the `~astropy.wcs`, # `pix2foc` will not apply all the required distortion # corrections then in the code below, calls to `pix2foc` will # have to be replaced with # wcs_world2pix(all_pix2world(pix_list, origin), origin) # # ############################################################ # # INITIALIZE ITERATIVE PROCESS: ## # ############################################################ # initial approximation (linear WCS based only) pix0 = self.wcs_world2pix(world, origin) # Check that an iterative solution is required at all # (when any of the non-CD-matrix-based corrections are # present). If not required return the initial # approximation (pix0). if not self.has_distortion: # No non-WCS corrections detected so # simply return initial approximation: return pix0 pix = pix0.copy() # 0-order solution # initial correction: dpix = self.pix2foc(pix, origin) - pix0 # Update initial solution: pix -= dpix # Norm (L2) squared of the correction: dn = np.sum(dpix*dpix, axis=1) dnprev = dn.copy() # if adaptive else dn tol2 = tolerance**2 # Prepare for iterative process k = 1 ind = None inddiv = None # Turn off numpy runtime warnings for 'invalid' and 'over': old_invalid = np.geterr()['invalid'] old_over = np.geterr()['over'] np.seterr(invalid='ignore', over='ignore') # ############################################################ # # NON-ADAPTIVE ITERATIONS: ## # ############################################################ if not adaptive: # Fixed-point iterations: while (np.nanmax(dn) >= tol2 and k < maxiter): # Find correction to the previous solution: dpix = self.pix2foc(pix, origin) - pix0 # Compute norm (L2) squared of the correction: dn = np.sum(dpix*dpix, axis=1) # Check for divergence (we do this in two stages # to optimize performance for the most common # scenario when successive approximations converge): if detect_divergence: divergent = (dn >= dnprev) if np.any(divergent): # Find solutions that have not yet converged: slowconv = (dn >= tol2) inddiv, = np.where(divergent & slowconv) if inddiv.shape[0] > 0: # Update indices of elements that # still need correction: conv = (dn < dnprev) iconv = np.where(conv) # Apply correction: dpixgood = dpix[iconv] pix[iconv] -= dpixgood dpix[iconv] = dpixgood # For the next iteration choose # non-divergent points that have not yet # converged to the requested accuracy: ind, = np.where(slowconv & conv) pix0 = pix0[ind] dnprev[ind] = dn[ind] k += 1 # Switch to adaptive iterations: adaptive = True break # Save current correction magnitudes for later: dnprev = dn # Apply correction: pix -= dpix k += 1 # ############################################################ # # ADAPTIVE ITERATIONS: ## # ############################################################ if adaptive: if ind is None: ind, = np.where(np.isfinite(pix).all(axis=1)) pix0 = pix0[ind] # "Adaptive" fixed-point iterations: while (ind.shape[0] > 0 and k < maxiter): # Find correction to the previous solution: dpixnew = self.pix2foc(pix[ind], origin) - pix0 # Compute norm (L2) of the correction: dnnew = np.sum(np.square(dpixnew), axis=1) # Bookkeeping of corrections: dnprev[ind] = dn[ind].copy() dn[ind] = dnnew if detect_divergence: # Find indices of pixels that are converging: conv = (dnnew < dnprev[ind]) iconv = np.where(conv) iiconv = ind[iconv] # Apply correction: dpixgood = dpixnew[iconv] pix[iiconv] -= dpixgood dpix[iiconv] = dpixgood # Find indices of solutions that have not yet # converged to the requested accuracy # AND that do not diverge: subind, = np.where((dnnew >= tol2) & conv) else: # Apply correction: pix[ind] -= dpixnew dpix[ind] = dpixnew # Find indices of solutions that have not yet # converged to the requested accuracy: subind, = np.where(dnnew >= tol2) # Choose solutions that need more iterations: ind = ind[subind] pix0 = pix0[subind] k += 1 # ############################################################ # # FINAL DETECTION OF INVALID, DIVERGING, ## # # AND FAILED-TO-CONVERGE POINTS ## # ############################################################ # Identify diverging and/or invalid points: invalid = ((~np.all(np.isfinite(pix), axis=1)) & (np.all(np.isfinite(world), axis=1))) # When detect_divergence==False, dnprev is outdated # (it is the norm of the very first correction). # Still better than nothing... inddiv, = np.where(((dn >= tol2) & (dn >= dnprev)) | invalid) if inddiv.shape[0] == 0: inddiv = None # Identify points that did not converge within 'maxiter' # iterations: if k >= maxiter: ind, = np.where((dn >= tol2) & (dn < dnprev) & (~invalid)) if ind.shape[0] == 0: ind = None else: ind = None # Restore previous numpy error settings: np.seterr(invalid=old_invalid, over=old_over) # ############################################################ # # RAISE EXCEPTION IF DIVERGING OR TOO SLOWLY CONVERGING ## # # DATA POINTS HAVE BEEN DETECTED: ## # ############################################################ if (ind is not None or inddiv is not None) and not quiet: if inddiv is None: raise NoConvergence( "'WCS.all_world2pix' failed to " "converge to the requested accuracy after {:d} " "iterations.".format(k), best_solution=pix, accuracy=np.abs(dpix), niter=k, slow_conv=ind, divergent=None) else: raise NoConvergence( "'WCS.all_world2pix' failed to " "converge to the requested accuracy.\n" "After {:d} iterations, the solution is diverging " "at least for one input point." .format(k), best_solution=pix, accuracy=np.abs(dpix), niter=k, slow_conv=ind, divergent=inddiv) return pix @deprecated_renamed_argument('accuracy', 'tolerance', '4.3') def all_world2pix(self, *args, tolerance=1e-4, maxiter=20, adaptive=False, detect_divergence=True, quiet=False, **kwargs): if self.wcs is None: raise ValueError("No basic WCS settings were created.") return self._array_converter( lambda *args, **kwargs: self._all_world2pix( *args, tolerance=tolerance, maxiter=maxiter, adaptive=adaptive, detect_divergence=detect_divergence, quiet=quiet), 'input', *args, **kwargs ) all_world2pix.__doc__ = """ all_world2pix(*arg, tolerance=1.0e-4, maxiter=20, adaptive=False, detect_divergence=True, quiet=False) Transforms world coordinates to pixel coordinates, using numerical iteration to invert the full forward transformation `~astropy.wcs.WCS.all_pix2world` with complete distortion model. Parameters ---------- {0} For a transformation that is not two-dimensional, the two-argument form must be used. {1} tolerance : float, optional (default = 1.0e-4) Tolerance of solution. Iteration terminates when the iterative solver estimates that the "true solution" is within this many pixels current estimate, more specifically, when the correction to the solution found during the previous iteration is smaller (in the sense of the L2 norm) than ``tolerance``. maxiter : int, optional (default = 20) Maximum number of iterations allowed to reach a solution. quiet : bool, optional (default = False) Do not throw :py:class:`NoConvergence` exceptions when the method does not converge to a solution with the required accuracy within a specified number of maximum iterations set by ``maxiter`` parameter. Instead, simply return the found solution. Other Parameters ---------------- adaptive : bool, optional (default = False) Specifies whether to adaptively select only points that did not converge to a solution within the required accuracy for the next iteration. Default is recommended for HST as well as most other instruments. .. note:: The :py:meth:`all_world2pix` uses a vectorized implementation of the method of consecutive approximations (see ``Notes`` section below) in which it iterates over *all* input points *regardless* until the required accuracy has been reached for *all* input points. In some cases it may be possible that *almost all* points have reached the required accuracy but there are only a few of input data points for which additional iterations may be needed (this depends mostly on the characteristics of the geometric distortions for a given instrument). In this situation it may be advantageous to set ``adaptive`` = `True` in which case :py:meth:`all_world2pix` will continue iterating *only* over the points that have not yet converged to the required accuracy. However, for the HST's ACS/WFC detector, which has the strongest distortions of all HST instruments, testing has shown that enabling this option would lead to a about 50-100% penalty in computational time (depending on specifics of the image, geometric distortions, and number of input points to be converted). Therefore, for HST and possibly instruments, it is recommended to set ``adaptive`` = `False`. The only danger in getting this setting wrong will be a performance penalty. .. note:: When ``detect_divergence`` is `True`, :py:meth:`all_world2pix` will automatically switch to the adaptive algorithm once divergence has been detected. detect_divergence : bool, optional (default = True) Specifies whether to perform a more detailed analysis of the convergence to a solution. Normally :py:meth:`all_world2pix` may not achieve the required accuracy if either the ``tolerance`` or ``maxiter`` arguments are too low. However, it may happen that for some geometric distortions the conditions of convergence for the the method of consecutive approximations used by :py:meth:`all_world2pix` may not be satisfied, in which case consecutive approximations to the solution will diverge regardless of the ``tolerance`` or ``maxiter`` settings. When ``detect_divergence`` is `False`, these divergent points will be detected as not having achieved the required accuracy (without further details). In addition, if ``adaptive`` is `False` then the algorithm will not know that the solution (for specific points) is diverging and will continue iterating and trying to "improve" diverging solutions. This may result in ``NaN`` or ``Inf`` values in the return results (in addition to a performance penalties). Even when ``detect_divergence`` is `False`, :py:meth:`all_world2pix`, at the end of the iterative process, will identify invalid results (``NaN`` or ``Inf``) as "diverging" solutions and will raise :py:class:`NoConvergence` unless the ``quiet`` parameter is set to `True`. When ``detect_divergence`` is `True`, :py:meth:`all_world2pix` will detect points for which current correction to the coordinates is larger than the correction applied during the previous iteration **if** the requested accuracy **has not yet been achieved**. In this case, if ``adaptive`` is `True`, these points will be excluded from further iterations and if ``adaptive`` is `False`, :py:meth:`all_world2pix` will automatically switch to the adaptive algorithm. Thus, the reported divergent solution will be the latest converging solution computed immediately *before* divergence has been detected. .. note:: When accuracy has been achieved, small increases in current corrections may be possible due to rounding errors (when ``adaptive`` is `False`) and such increases will be ignored. .. note:: Based on our testing using HST ACS/WFC images, setting ``detect_divergence`` to `True` will incur about 5-20% performance penalty with the larger penalty corresponding to ``adaptive`` set to `True`. Because the benefits of enabling this feature outweigh the small performance penalty, especially when ``adaptive`` = `False`, it is recommended to set ``detect_divergence`` to `True`, unless extensive testing of the distortion models for images from specific instruments show a good stability of the numerical method for a wide range of coordinates (even outside the image itself). .. note:: Indices of the diverging inverse solutions will be reported in the ``divergent`` attribute of the raised :py:class:`NoConvergence` exception object. Returns ------- {2} Notes ----- The order of the axes for the input world array is determined by the ``CTYPEia`` keywords in the FITS header, therefore it may not always be of the form (*ra*, *dec*). The `~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`, `~astropy.wcs.Wcsprm.lattyp`, and `~astropy.wcs.Wcsprm.lngtyp` members can be used to determine the order of the axes. Using the method of fixed-point iterations approximations we iterate starting with the initial approximation, which is computed using the non-distortion-aware :py:meth:`wcs_world2pix` (or equivalent). The :py:meth:`all_world2pix` function uses a vectorized implementation of the method of consecutive approximations and therefore it is highly efficient (>30x) when *all* data points that need to be converted from sky coordinates to image coordinates are passed at *once*. Therefore, it is advisable, whenever possible, to pass as input a long array of all points that need to be converted to :py:meth:`all_world2pix` instead of calling :py:meth:`all_world2pix` for each data point. Also see the note to the ``adaptive`` parameter. Raises ------ NoConvergence The method did not converge to a solution to the required accuracy within a specified number of maximum iterations set by the ``maxiter`` parameter. To turn off this exception, set ``quiet`` to `True`. Indices of the points for which the requested accuracy was not achieved (if any) will be listed in the ``slow_conv`` attribute of the raised :py:class:`NoConvergence` exception object. See :py:class:`NoConvergence` documentation for more details. MemoryError Memory allocation failed. SingularMatrixError Linear transformation matrix is singular. InconsistentAxisTypesError Inconsistent or unrecognized coordinate axis types. ValueError Invalid parameter value. ValueError Invalid coordinate transformation parameters. ValueError x- and y-coordinate arrays are not the same size. InvalidTransformError Invalid coordinate transformation parameters. InvalidTransformError Ill-conditioned coordinate transformation parameters. Examples -------- >>> import astropy.io.fits as fits >>> import astropy.wcs as wcs >>> import numpy as np >>> import os >>> filename = os.path.join(wcs.__path__[0], 'tests/data/j94f05bgq_flt.fits') >>> hdulist = fits.open(filename) >>> w = wcs.WCS(hdulist[('sci',1)].header, hdulist) >>> hdulist.close() >>> ra, dec = w.all_pix2world([1,2,3], [1,1,1], 1) >>> print(ra) # doctest: +FLOAT_CMP [ 5.52645627 5.52649663 5.52653698] >>> print(dec) # doctest: +FLOAT_CMP [-72.05171757 -72.05171276 -72.05170795] >>> radec = w.all_pix2world([[1,1], [2,1], [3,1]], 1) >>> print(radec) # doctest: +FLOAT_CMP [[ 5.52645627 -72.05171757] [ 5.52649663 -72.05171276] [ 5.52653698 -72.05170795]] >>> x, y = w.all_world2pix(ra, dec, 1) >>> print(x) # doctest: +FLOAT_CMP [ 1.00000238 2.00000237 3.00000236] >>> print(y) # doctest: +FLOAT_CMP [ 0.99999996 0.99999997 0.99999997] >>> xy = w.all_world2pix(radec, 1) >>> print(xy) # doctest: +FLOAT_CMP [[ 1.00000238 0.99999996] [ 2.00000237 0.99999997] [ 3.00000236 0.99999997]] >>> xy = w.all_world2pix(radec, 1, maxiter=3, ... tolerance=1.0e-10, quiet=False) Traceback (most recent call last): ... NoConvergence: 'WCS.all_world2pix' failed to converge to the requested accuracy. After 3 iterations, the solution is diverging at least for one input point. >>> # Now try to use some diverging data: >>> divradec = w.all_pix2world([[1.0, 1.0], ... [10000.0, 50000.0], ... [3.0, 1.0]], 1) >>> print(divradec) # doctest: +FLOAT_CMP [[ 5.52645627 -72.05171757] [ 7.15976932 -70.8140779 ] [ 5.52653698 -72.05170795]] >>> # First, turn detect_divergence on: >>> try: # doctest: +FLOAT_CMP ... xy = w.all_world2pix(divradec, 1, maxiter=20, ... tolerance=1.0e-4, adaptive=False, ... detect_divergence=True, ... quiet=False) ... except wcs.wcs.NoConvergence as e: ... print("Indices of diverging points: {{0}}" ... .format(e.divergent)) ... print("Indices of poorly converging points: {{0}}" ... .format(e.slow_conv)) ... print("Best solution:\\n{{0}}".format(e.best_solution)) ... print("Achieved accuracy:\\n{{0}}".format(e.accuracy)) Indices of diverging points: [1] Indices of poorly converging points: None Best solution: [[ 1.00000238e+00 9.99999965e-01] [ -1.99441636e+06 1.44309097e+06] [ 3.00000236e+00 9.99999966e-01]] Achieved accuracy: [[ 6.13968380e-05 8.59638593e-07] [ 8.59526812e+11 6.61713548e+11] [ 6.09398446e-05 8.38759724e-07]] >>> raise e Traceback (most recent call last): ... NoConvergence: 'WCS.all_world2pix' failed to converge to the requested accuracy. After 5 iterations, the solution is diverging at least for one input point. >>> # This time turn detect_divergence off: >>> try: # doctest: +FLOAT_CMP ... xy = w.all_world2pix(divradec, 1, maxiter=20, ... tolerance=1.0e-4, adaptive=False, ... detect_divergence=False, ... quiet=False) ... except wcs.wcs.NoConvergence as e: ... print("Indices of diverging points: {{0}}" ... .format(e.divergent)) ... print("Indices of poorly converging points: {{0}}" ... .format(e.slow_conv)) ... print("Best solution:\\n{{0}}".format(e.best_solution)) ... print("Achieved accuracy:\\n{{0}}".format(e.accuracy)) Indices of diverging points: [1] Indices of poorly converging points: None Best solution: [[ 1.00000009 1. ] [ nan nan] [ 3.00000009 1. ]] Achieved accuracy: [[ 2.29417358e-06 3.21222995e-08] [ nan nan] [ 2.27407877e-06 3.13005639e-08]] >>> raise e Traceback (most recent call last): ... NoConvergence: 'WCS.all_world2pix' failed to converge to the requested accuracy. After 6 iterations, the solution is diverging at least for one input point. """.format(docstrings.TWO_OR_MORE_ARGS('naxis', 8), docstrings.RA_DEC_ORDER(8), docstrings.RETURNS('pixel coordinates', 8)) def wcs_world2pix(self, *args, **kwargs): if self.wcs is None: raise ValueError("No basic WCS settings were created.") return self._array_converter( lambda xy, o: self.wcs.s2p(xy, o)['pixcrd'], 'input', *args, **kwargs) wcs_world2pix.__doc__ = """ Transforms world coordinates to pixel coordinates, using only the basic `wcslib`_ WCS transformation. No `SIP`_ or `distortion paper`_ table lookup transformation is applied. Parameters ---------- {} For a transformation that is not two-dimensional, the two-argument form must be used. {} Returns ------- {} Notes ----- The order of the axes for the input world array is determined by the ``CTYPEia`` keywords in the FITS header, therefore it may not always be of the form (*ra*, *dec*). The `~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`, `~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp` members can be used to determine the order of the axes. Raises ------ MemoryError Memory allocation failed. SingularMatrixError Linear transformation matrix is singular. InconsistentAxisTypesError Inconsistent or unrecognized coordinate axis types. ValueError Invalid parameter value. ValueError Invalid coordinate transformation parameters. ValueError x- and y-coordinate arrays are not the same size. InvalidTransformError Invalid coordinate transformation parameters. InvalidTransformError Ill-conditioned coordinate transformation parameters. """.format(docstrings.TWO_OR_MORE_ARGS('naxis', 8), docstrings.RA_DEC_ORDER(8), docstrings.RETURNS('pixel coordinates', 8)) def pix2foc(self, *args): return self._array_converter(self._pix2foc, None, *args) pix2foc.__doc__ = """ Convert pixel coordinates to focal plane coordinates using the `SIP`_ polynomial distortion convention and `distortion paper`_ table-lookup correction. The output is in absolute pixel coordinates, not relative to ``CRPIX``. Parameters ---------- {} Returns ------- {} Raises ------ MemoryError Memory allocation failed. ValueError Invalid coordinate transformation parameters. """.format(docstrings.TWO_OR_MORE_ARGS('2', 8), docstrings.RETURNS('focal coordinates', 8)) def p4_pix2foc(self, *args): return self._array_converter(self._p4_pix2foc, None, *args) p4_pix2foc.__doc__ = """ Convert pixel coordinates to focal plane coordinates using `distortion paper`_ table-lookup correction. The output is in absolute pixel coordinates, not relative to ``CRPIX``. Parameters ---------- {} Returns ------- {} Raises ------ MemoryError Memory allocation failed. ValueError Invalid coordinate transformation parameters. """.format(docstrings.TWO_OR_MORE_ARGS('2', 8), docstrings.RETURNS('focal coordinates', 8)) def det2im(self, *args): return self._array_converter(self._det2im, None, *args) det2im.__doc__ = """ Convert detector coordinates to image plane coordinates using `distortion paper`_ table-lookup correction. The output is in absolute pixel coordinates, not relative to ``CRPIX``. Parameters ---------- {} Returns ------- {} Raises ------ MemoryError Memory allocation failed. ValueError Invalid coordinate transformation parameters. """.format(docstrings.TWO_OR_MORE_ARGS('2', 8), docstrings.RETURNS('pixel coordinates', 8)) def sip_pix2foc(self, *args): if self.sip is None: if len(args) == 2: return args[0] elif len(args) == 3: return args[:2] else: raise TypeError("Wrong number of arguments") return self._array_converter(self.sip.pix2foc, None, *args) sip_pix2foc.__doc__ = """ Convert pixel coordinates to focal plane coordinates using the `SIP`_ polynomial distortion convention. The output is in pixel coordinates, relative to ``CRPIX``. FITS WCS `distortion paper`_ table lookup correction is not applied, even if that information existed in the FITS file that initialized this :class:`~astropy.wcs.WCS` object. To correct for that, use `~astropy.wcs.WCS.pix2foc` or `~astropy.wcs.WCS.p4_pix2foc`. Parameters ---------- {} Returns ------- {} Raises ------ MemoryError Memory allocation failed. ValueError Invalid coordinate transformation parameters. """.format(docstrings.TWO_OR_MORE_ARGS('2', 8), docstrings.RETURNS('focal coordinates', 8)) def sip_foc2pix(self, *args): if self.sip is None: if len(args) == 2: return args[0] elif len(args) == 3: return args[:2] else: raise TypeError("Wrong number of arguments") return self._array_converter(self.sip.foc2pix, None, *args) sip_foc2pix.__doc__ = """ Convert focal plane coordinates to pixel coordinates using the `SIP`_ polynomial distortion convention. FITS WCS `distortion paper`_ table lookup distortion correction is not applied, even if that information existed in the FITS file that initialized this `~astropy.wcs.WCS` object. Parameters ---------- {} Returns ------- {} Raises ------ MemoryError Memory allocation failed. ValueError Invalid coordinate transformation parameters. """.format(docstrings.TWO_OR_MORE_ARGS('2', 8), docstrings.RETURNS('pixel coordinates', 8)) def proj_plane_pixel_scales(self): """ Calculate pixel scales along each axis of the image pixel at the ``CRPIX`` location once it is projected onto the "plane of intermediate world coordinates" as defined in `Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_. .. note:: This method is concerned **only** about the transformation "image plane"->"projection plane" and **not** about the transformation "celestial sphere"->"projection plane"->"image plane". Therefore, this function ignores distortions arising due to non-linear nature of most projections. .. note:: This method only returns sensible answers if the WCS contains celestial axes, i.e., the `~astropy.wcs.WCS.celestial` WCS object. Returns ------- scale : list of `~astropy.units.Quantity` A vector of projection plane increments corresponding to each pixel side (axis). See Also -------- astropy.wcs.utils.proj_plane_pixel_scales """ # noqa: E501 from astropy.wcs.utils import proj_plane_pixel_scales # Avoid circular import values = proj_plane_pixel_scales(self) units = [u.Unit(x) for x in self.wcs.cunit] return [value * unit for (value, unit) in zip(values, units)] # Can have different units def proj_plane_pixel_area(self): """ For a **celestial** WCS (see `astropy.wcs.WCS.celestial`), returns pixel area of the image pixel at the ``CRPIX`` location once it is projected onto the "plane of intermediate world coordinates" as defined in `Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_. .. note:: This function is concerned **only** about the transformation "image plane"->"projection plane" and **not** about the transformation "celestial sphere"->"projection plane"->"image plane". Therefore, this function ignores distortions arising due to non-linear nature of most projections. .. note:: This method only returns sensible answers if the WCS contains celestial axes, i.e., the `~astropy.wcs.WCS.celestial` WCS object. Returns ------- area : `~astropy.units.Quantity` Area (in the projection plane) of the pixel at ``CRPIX`` location. Raises ------ ValueError Pixel area is defined only for 2D pixels. Most likely the `~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial` WCS is not a square matrix of second order. Notes ----- Depending on the application, square root of the pixel area can be used to represent a single pixel scale of an equivalent square pixel whose area is equal to the area of a generally non-square pixel. See Also -------- astropy.wcs.utils.proj_plane_pixel_area """ # noqa: E501 from astropy.wcs.utils import proj_plane_pixel_area # Avoid circular import value = proj_plane_pixel_area(self) unit = u.Unit(self.wcs.cunit[0]) * u.Unit(self.wcs.cunit[1]) # 2D only return value * unit def to_fits(self, relax=False, key=None): """ Generate an `~astropy.io.fits.HDUList` object with all of the information stored in this object. This should be logically identical to the input FITS file, but it will be normalized in a number of ways. See `to_header` for some warnings about the output produced. Parameters ---------- relax : bool or int, optional Degree of permissiveness: - `False` (default): Write all extensions that are considered to be safe and recommended. - `True`: Write all recognized informal extensions of the WCS standard. - `int`: a bit field selecting specific extensions to write. See :ref:`astropy:relaxwrite` for details. key : str The name of a particular WCS transform to use. This may be either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"`` part of the ``CTYPEia`` cards. Returns ------- hdulist : `~astropy.io.fits.HDUList` """ header = self.to_header(relax=relax, key=key) hdu = fits.PrimaryHDU(header=header) hdulist = fits.HDUList(hdu) self._write_det2im(hdulist) self._write_distortion_kw(hdulist) return hdulist def to_header(self, relax=None, key=None): """Generate an `astropy.io.fits.Header` object with the basic WCS and SIP information stored in this object. This should be logically identical to the input FITS file, but it will be normalized in a number of ways. .. warning:: This function does not write out FITS WCS `distortion paper`_ information, since that requires multiple FITS header data units. To get a full representation of everything in this object, use `to_fits`. Parameters ---------- relax : bool or int, optional Degree of permissiveness: - `False` (default): Write all extensions that are considered to be safe and recommended. - `True`: Write all recognized informal extensions of the WCS standard. - `int`: a bit field selecting specific extensions to write. See :ref:`astropy:relaxwrite` for details. If the ``relax`` keyword argument is not given and any keywords were omitted from the output, an `~astropy.utils.exceptions.AstropyWarning` is displayed. To override this, explicitly pass a value to ``relax``. key : str The name of a particular WCS transform to use. This may be either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"`` part of the ``CTYPEia`` cards. Returns ------- header : `astropy.io.fits.Header` Notes ----- The output header will almost certainly differ from the input in a number of respects: 1. The output header only contains WCS-related keywords. In particular, it does not contain syntactically-required keywords such as ``SIMPLE``, ``NAXIS``, ``BITPIX``, or ``END``. 2. Deprecated (e.g. ``CROTAn``) or non-standard usage will be translated to standard (this is partially dependent on whether ``fix`` was applied). 3. Quantities will be converted to the units used internally, basically SI with the addition of degrees. 4. Floating-point quantities may be given to a different decimal precision. 5. Elements of the ``PCi_j`` matrix will be written if and only if they differ from the unit matrix. Thus, if the matrix is unity then no elements will be written. 6. Additional keywords such as ``WCSAXES``, ``CUNITia``, ``LONPOLEa`` and ``LATPOLEa`` may appear. 7. The original keycomments will be lost, although `to_header` tries hard to write meaningful comments. 8. Keyword order may be changed. """ # default precision for numerical WCS keywords precision = WCSHDO_P14 # Defined by C-ext # noqa: F821 display_warning = False if relax is None: display_warning = True relax = False if relax not in (True, False): do_sip = relax & WCSHDO_SIP relax &= ~WCSHDO_SIP else: do_sip = relax relax = WCSHDO_all if relax is True else WCSHDO_safe # Defined by C-ext # noqa: F821 relax = precision | relax if self.wcs is not None: if key is not None: orig_key = self.wcs.alt self.wcs.alt = key header_string = self.wcs.to_header(relax) header = fits.Header.fromstring(header_string) keys_to_remove = ["", " ", "COMMENT"] for kw in keys_to_remove: if kw in header: del header[kw] # Check if we can handle TPD distortion correctly if _WCS_TPD_WARN_LT71: for kw, val in header.items(): if kw[:5] in ('CPDIS', 'CQDIS') and val == 'TPD': warnings.warn( f"WCS contains a TPD distortion model in {kw}. WCSLIB " f"{_wcs.__version__} is writing this in a format incompatible with " f"current versions - please update to 7.4 or use the bundled WCSLIB.", AstropyWarning) elif _WCS_TPD_WARN_LT74: for kw, val in header.items(): if kw[:5] in ('CPDIS', 'CQDIS') and val == 'TPD': warnings.warn( f"WCS contains a TPD distortion model in {kw}, which requires WCSLIB " f"7.4 or later to store in a FITS header (having {_wcs.__version__}).", AstropyWarning) else: header = fits.Header() if do_sip and self.sip is not None: if self.wcs is not None and any(not ctyp.endswith('-SIP') for ctyp in self.wcs.ctype): self._fix_ctype(header, add_sip=True) for kw, val in self._write_sip_kw().items(): header[kw] = val if not do_sip and self.wcs is not None and any(self.wcs.ctype) and self.sip is not None: # This is called when relax is not False or WCSHDO_SIP # The default case of ``relax=None`` is handled further in the code. header = self._fix_ctype(header, add_sip=False) if display_warning: full_header = self.to_header(relax=True, key=key) missing_keys = [] for kw, val in full_header.items(): if kw not in header: missing_keys.append(kw) if len(missing_keys): warnings.warn( "Some non-standard WCS keywords were excluded: {} " "Use the ``relax`` kwarg to control this.".format( ', '.join(missing_keys)), AstropyWarning) # called when ``relax=None`` # This is different from the case of ``relax=False``. if any(self.wcs.ctype) and self.sip is not None: header = self._fix_ctype(header, add_sip=False, log_message=False) # Finally reset the key. This must be called after ``_fix_ctype``. if key is not None: self.wcs.alt = orig_key return header def _fix_ctype(self, header, add_sip=True, log_message=True): """ Parameters ---------- header : `~astropy.io.fits.Header` FITS header. add_sip : bool Flag indicating whether "-SIP" should be added or removed from CTYPE keywords. Remove "-SIP" from CTYPE when writing out a header with relax=False. This needs to be done outside ``to_header`` because ``to_header`` runs twice when ``relax=False`` and the second time ``relax`` is set to ``True`` to display the missing keywords. If the user requested SIP distortion to be written out add "-SIP" to CTYPE if it is missing. """ _add_sip_to_ctype = """ Inconsistent SIP distortion information is present in the current WCS: SIP coefficients were detected, but CTYPE is missing "-SIP" suffix, therefore the current WCS is internally inconsistent. Because relax has been set to True, the resulting output WCS will have "-SIP" appended to CTYPE in order to make the header internally consistent. However, this may produce incorrect astrometry in the output WCS, if in fact the current WCS is already distortion-corrected. Therefore, if current WCS is already distortion-corrected (eg, drizzled) then SIP distortion components should not apply. In that case, for a WCS that is already distortion-corrected, please remove the SIP coefficients from the header. """ if log_message: if add_sip: log.info(_add_sip_to_ctype) for i in range(1, self.naxis+1): # strip() must be called here to cover the case of alt key= " " kw = f'CTYPE{i}{self.wcs.alt}'.strip() if kw in header: if add_sip: val = header[kw].strip("-SIP") + "-SIP" else: val = header[kw].strip("-SIP") header[kw] = val else: continue return header def to_header_string(self, relax=None): """ Identical to `to_header`, but returns a string containing the header cards. """ return str(self.to_header(relax)) def footprint_to_file(self, filename='footprint.reg', color='green', width=2, coordsys=None): """ Writes out a `ds9`_ style regions file. It can be loaded directly by `ds9`_. Parameters ---------- filename : str, optional Output file name - default is ``'footprint.reg'`` color : str, optional Color to use when plotting the line. width : int, optional Width of the region line. coordsys : str, optional Coordinate system. If not specified (default), the ``radesys`` value is used. For all possible values, see http://ds9.si.edu/doc/ref/region.html#RegionFileFormat """ comments = ('# Region file format: DS9 version 4.0 \n' '# global color=green font="helvetica 12 bold ' 'select=1 highlite=1 edit=1 move=1 delete=1 ' 'include=1 fixed=0 source\n') coordsys = coordsys or self.wcs.radesys if coordsys not in ('PHYSICAL', 'IMAGE', 'FK4', 'B1950', 'FK5', 'J2000', 'GALACTIC', 'ECLIPTIC', 'ICRS', 'LINEAR', 'AMPLIFIER', 'DETECTOR'): raise ValueError("Coordinate system '{}' is not supported. A valid" " one can be given with the 'coordsys' argument." .format(coordsys)) with open(filename, mode='w') as f: f.write(comments) f.write(f'{coordsys}\n') f.write('polygon(') ftpr = self.calc_footprint() if ftpr is not None: ftpr.tofile(f, sep=',') f.write(f') # color={color}, width={width:d} \n') def _get_naxis(self, header=None): _naxis = [] if (header is not None and not isinstance(header, (str, bytes))): for naxis in itertools.count(1): try: _naxis.append(header[f'NAXIS{naxis}']) except KeyError: break if len(_naxis) == 0: _naxis = [0, 0] elif len(_naxis) == 1: _naxis.append(0) self._naxis = _naxis def printwcs(self): print(repr(self)) def __repr__(self): ''' Return a short description. Simply porting the behavior from the `printwcs()` method. ''' description = ["WCS Keywords\n", f"Number of WCS axes: {self.naxis!r}"] sfmt = ' : ' + "".join(["{"+f"{i}"+"!r} " for i in range(self.naxis)]) keywords = ['CTYPE', 'CRVAL', 'CRPIX'] values = [self.wcs.ctype, self.wcs.crval, self.wcs.crpix] for keyword, value in zip(keywords, values): description.append(keyword+sfmt.format(*value)) if hasattr(self.wcs, 'pc'): for i in range(self.naxis): s = '' for j in range(self.naxis): s += ''.join(['PC', str(i+1), '_', str(j+1), ' ']) s += sfmt description.append(s.format(*self.wcs.pc[i])) s = 'CDELT' + sfmt description.append(s.format(*self.wcs.cdelt)) elif hasattr(self.wcs, 'cd'): for i in range(self.naxis): s = '' for j in range(self.naxis): s += "".join(['CD', str(i+1), '_', str(j+1), ' ']) s += sfmt description.append(s.format(*self.wcs.cd[i])) description.append(f"NAXIS : {' '.join(map(str, self._naxis))}") return '\n'.join(description) def get_axis_types(self): """ Similar to `self.wcsprm.axis_types <astropy.wcs.Wcsprm.axis_types>` but provides the information in a more Python-friendly format. Returns ------- result : list of dict Returns a list of dictionaries, one for each axis, each containing attributes about the type of that axis. Each dictionary has the following keys: - 'coordinate_type': - None: Non-specific coordinate type. - 'stokes': Stokes coordinate. - 'celestial': Celestial coordinate (including ``CUBEFACE``). - 'spectral': Spectral coordinate. - 'scale': - 'linear': Linear axis. - 'quantized': Quantized axis (``STOKES``, ``CUBEFACE``). - 'non-linear celestial': Non-linear celestial axis. - 'non-linear spectral': Non-linear spectral axis. - 'logarithmic': Logarithmic axis. - 'tabular': Tabular axis. - 'group' - Group number, e.g. lookup table number - 'number' - For celestial axes: - 0: Longitude coordinate. - 1: Latitude coordinate. - 2: ``CUBEFACE`` number. - For lookup tables: - the axis number in a multidimensional table. ``CTYPEia`` in ``"4-3"`` form with unrecognized algorithm code will generate an error. """ if self.wcs is None: raise AttributeError( "This WCS object does not have a wcsprm object.") coordinate_type_map = { 0: None, 1: 'stokes', 2: 'celestial', 3: 'spectral'} scale_map = { 0: 'linear', 1: 'quantized', 2: 'non-linear celestial', 3: 'non-linear spectral', 4: 'logarithmic', 5: 'tabular'} result = [] for axis_type in self.wcs.axis_types: subresult = {} coordinate_type = (axis_type // 1000) % 10 subresult['coordinate_type'] = coordinate_type_map[coordinate_type] scale = (axis_type // 100) % 10 subresult['scale'] = scale_map[scale] group = (axis_type // 10) % 10 subresult['group'] = group number = axis_type % 10 subresult['number'] = number result.append(subresult) return result def __reduce__(self): """ Support pickling of WCS objects. This is done by serializing to an in-memory FITS file and dumping that as a string. """ hdulist = self.to_fits(relax=True) buffer = io.BytesIO() hdulist.writeto(buffer) dct = self.__dict__.copy() dct['_alt_wcskey'] = self.wcs.alt return (__WCS_unpickle__, (self.__class__, dct, buffer.getvalue(),)) def dropaxis(self, dropax): """ Remove an axis from the WCS. Parameters ---------- wcs : `~astropy.wcs.WCS` The WCS with naxis to be chopped to naxis-1 dropax : int The index of the WCS to drop, counting from 0 (i.e., python convention, not FITS convention) Returns ------- `~astropy.wcs.WCS` A new `~astropy.wcs.WCS` instance with one axis fewer """ inds = list(range(self.wcs.naxis)) inds.pop(dropax) # axis 0 has special meaning to sub # if wcs.wcs.ctype == ['RA','DEC','VLSR'], you want # wcs.sub([1,2]) to get 'RA','DEC' back return self.sub([i+1 for i in inds]) def swapaxes(self, ax0, ax1): """ Swap axes in a WCS. Parameters ---------- wcs : `~astropy.wcs.WCS` The WCS to have its axes swapped ax0 : int ax1 : int The indices of the WCS to be swapped, counting from 0 (i.e., python convention, not FITS convention) Returns ------- `~astropy.wcs.WCS` A new `~astropy.wcs.WCS` instance with the same number of axes, but two swapped """ inds = list(range(self.wcs.naxis)) inds[ax0], inds[ax1] = inds[ax1], inds[ax0] return self.sub([i+1 for i in inds]) def reorient_celestial_first(self): """ Reorient the WCS such that the celestial axes are first, followed by the spectral axis, followed by any others. Assumes at least celestial axes are present. """ return self.sub([WCSSUB_CELESTIAL, WCSSUB_SPECTRAL, WCSSUB_STOKES, WCSSUB_TIME]) # Defined by C-ext # noqa: F821 E501 def slice(self, view, numpy_order=True): """ Slice a WCS instance using a Numpy slice. The order of the slice should be reversed (as for the data) compared to the natural WCS order. Parameters ---------- view : tuple A tuple containing the same number of slices as the WCS system. The ``step`` method, the third argument to a slice, is not presently supported. numpy_order : bool Use numpy order, i.e. slice the WCS so that an identical slice applied to a numpy array will slice the array and WCS in the same way. If set to `False`, the WCS will be sliced in FITS order, meaning the first slice will be applied to the *last* numpy index but the *first* WCS axis. Returns ------- wcs_new : `~astropy.wcs.WCS` A new resampled WCS axis """ if hasattr(view, '__len__') and len(view) > self.wcs.naxis: raise ValueError("Must have # of slices <= # of WCS axes") elif not hasattr(view, '__len__'): # view MUST be an iterable view = [view] if not all(isinstance(x, slice) for x in view): # We need to drop some dimensions, but this may not always be # possible with .sub due to correlated axes, so instead we use the # generalized slicing infrastructure from astropy.wcs.wcsapi. return SlicedFITSWCS(self, view) # NOTE: we could in principle use SlicedFITSWCS as above for all slicing, # but in the simple case where there are no axes dropped, we can just # create a full WCS object with updated WCS parameters which is faster # for this specific case and also backward-compatible. wcs_new = self.deepcopy() if wcs_new.sip is not None: sip_crpix = wcs_new.sip.crpix.tolist() for i, iview in enumerate(view): if iview.step is not None and iview.step < 0: raise NotImplementedError("Reversing an axis is not " "implemented.") if numpy_order: wcs_index = self.wcs.naxis - 1 - i else: wcs_index = i if iview.step is not None and iview.start is None: # Slice from "None" is equivalent to slice from 0 (but one # might want to downsample, so allow slices with # None,None,step or None,stop,step) iview = slice(0, iview.stop, iview.step) if iview.start is not None: if iview.step not in (None, 1): crpix = self.wcs.crpix[wcs_index] cdelt = self.wcs.cdelt[wcs_index] # equivalently (keep this comment so you can compare eqns): # wcs_new.wcs.crpix[wcs_index] = # (crpix - iview.start)*iview.step + 0.5 - iview.step/2. crp = ((crpix - iview.start - 1.)/iview.step + 0.5 + 1./iview.step/2.) wcs_new.wcs.crpix[wcs_index] = crp if wcs_new.sip is not None: sip_crpix[wcs_index] = crp wcs_new.wcs.cdelt[wcs_index] = cdelt * iview.step else: wcs_new.wcs.crpix[wcs_index] -= iview.start if wcs_new.sip is not None: sip_crpix[wcs_index] -= iview.start try: # range requires integers but the other attributes can also # handle arbitrary values, so this needs to be in a try/except. nitems = len(builtins.range(self._naxis[wcs_index])[iview]) except TypeError as exc: if 'indices must be integers' not in str(exc): raise warnings.warn("NAXIS{} attribute is not updated because at " "least one index ('{}') is no integer." "".format(wcs_index, iview), AstropyUserWarning) else: wcs_new._naxis[wcs_index] = nitems if wcs_new.sip is not None: wcs_new.sip = Sip(self.sip.a, self.sip.b, self.sip.ap, self.sip.bp, sip_crpix) return wcs_new def __getitem__(self, item): # "getitem" is a shortcut for self.slice; it is very limited # there is no obvious and unambiguous interpretation of wcs[1,2,3] # We COULD allow wcs[1] to link to wcs.sub([2]) # (wcs[i] -> wcs.sub([i+1]) return self.slice(item) def __iter__(self): # Having __getitem__ makes Python think WCS is iterable. However, # Python first checks whether __iter__ is present, so we can raise an # exception here. raise TypeError(f"'{self.__class__.__name__}' object is not iterable") @property def axis_type_names(self): """ World names for each coordinate axis Returns ------- list of str A list of names along each axis. """ names = list(self.wcs.cname) types = self.wcs.ctype for i in range(len(names)): if len(names[i]) > 0: continue names[i] = types[i].split('-')[0] return names @property def celestial(self): """ A copy of the current WCS with only the celestial axes included """ return self.sub([WCSSUB_CELESTIAL]) # Defined by C-ext # noqa: F821 @property def is_celestial(self): return self.has_celestial and self.naxis == 2 @property def has_celestial(self): try: return self.wcs.lng >= 0 and self.wcs.lat >= 0 except InconsistentAxisTypesError: return False @property def spectral(self): """ A copy of the current WCS with only the spectral axes included """ return self.sub([WCSSUB_SPECTRAL]) # Defined by C-ext # noqa: F821 @property def is_spectral(self): return self.has_spectral and self.naxis == 1 @property def has_spectral(self): try: return self.wcs.spec >= 0 except InconsistentAxisTypesError: return False @property def temporal(self): """ A copy of the current WCS with only the time axes included """ if not _WCSSUB_TIME_SUPPORT: raise NotImplementedError( "Support for 'temporal' axis requires WCSLIB version 7.8 or " f"greater but linked WCSLIB version is {_wcs.__version__}" ) return self.sub([WCSSUB_TIME]) # Defined by C-ext # noqa: F821 @property def is_temporal(self): return self.has_temporal and self.naxis == 1 @property def has_temporal(self): return any(t // 1000 == 4 for t in self.wcs.axis_types) @property def has_distortion(self): """ Returns `True` if any distortion terms are present. """ return (self.sip is not None or self.cpdis1 is not None or self.cpdis2 is not None or self.det2im1 is not None and self.det2im2 is not None) @property def pixel_scale_matrix(self): try: cdelt = np.diag(self.wcs.get_cdelt()) pc = self.wcs.get_pc() except InconsistentAxisTypesError: try: # for non-celestial axes, get_cdelt doesn't work with warnings.catch_warnings(): warnings.filterwarnings( 'ignore', 'cdelt will be ignored since cd is present', RuntimeWarning) cdelt = np.dot(self.wcs.cd, np.diag(self.wcs.cdelt)) except AttributeError: cdelt = np.diag(self.wcs.cdelt) try: pc = self.wcs.pc except AttributeError: pc = 1 pccd = np.dot(cdelt, pc) return pccd def footprint_contains(self, coord, **kwargs): """ Determines if a given SkyCoord is contained in the wcs footprint. Parameters ---------- coord : `~astropy.coordinates.SkyCoord` The coordinate to check if it is within the wcs coordinate. **kwargs : Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel` Returns ------- response : bool True means the WCS footprint contains the coordinate, False means it does not. """ return coord.contained_by(self, **kwargs) def __WCS_unpickle__(cls, dct, fits_data): """ Unpickles a WCS object from a serialized FITS string. """ self = cls.__new__(cls) buffer = io.BytesIO(fits_data) hdulist = fits.open(buffer) naxis = dct.pop('naxis', None) if naxis: hdulist[0].header['naxis'] = naxis naxes = dct.pop('_naxis', []) for k, na in enumerate(naxes): hdulist[0].header[f'naxis{k + 1:d}'] = na kwargs = dct.pop('_init_kwargs', {}) self.__dict__.update(dct) wcskey = dct.pop('_alt_wcskey', ' ') WCS.__init__(self, hdulist[0].header, hdulist, key=wcskey, **kwargs) self.pixel_bounds = dct.get('_pixel_bounds', None) return self def find_all_wcs(header, relax=True, keysel=None, fix=True, translate_units='', _do_set=True): """ Find all the WCS transformations in the given header. Parameters ---------- header : str or `~astropy.io.fits.Header` object. relax : bool or int, optional Degree of permissiveness: - `True` (default): Admit all recognized informal extensions of the WCS standard. - `False`: Recognize only FITS keywords defined by the published WCS standard. - `int`: a bit field selecting specific extensions to accept. See :ref:`astropy:relaxread` for details. keysel : sequence of str, optional A list of flags used to select the keyword types considered by wcslib. When ``None``, only the standard image header keywords are considered (and the underlying wcspih() C function is called). To use binary table image array or pixel list keywords, *keysel* must be set. Each element in the list should be one of the following strings: - 'image': Image header keywords - 'binary': Binary table image array keywords - 'pixel': Pixel list keywords Keywords such as ``EQUIna`` or ``RFRQna`` that are common to binary table image arrays and pixel lists (including ``WCSNna`` and ``TWCSna``) are selected by both 'binary' and 'pixel'. fix : bool, optional When `True` (default), call `~astropy.wcs.Wcsprm.fix` on the resulting objects to fix any non-standard uses in the header. `FITSFixedWarning` warnings will be emitted if any changes were made. translate_units : str, optional Specify which potentially unsafe translations of non-standard unit strings to perform. By default, performs none. See `WCS.fix` for more information about this parameter. Only effective when ``fix`` is `True`. Returns ------- wcses : list of `WCS` """ if isinstance(header, (str, bytes)): header_string = header elif isinstance(header, fits.Header): header_string = header.tostring() else: raise TypeError( "header must be a string or astropy.io.fits.Header object") keysel_flags = _parse_keysel(keysel) if isinstance(header_string, str): header_bytes = header_string.encode('ascii') else: header_bytes = header_string wcsprms = _wcs.find_all_wcs(header_bytes, relax, keysel_flags) result = [] for wcsprm in wcsprms: subresult = WCS(fix=False, _do_set=False) subresult.wcs = wcsprm result.append(subresult) if fix: subresult.fix(translate_units) if _do_set: subresult.wcs.set() return result def validate(source): """ Prints a WCS validation report for the given FITS file. Parameters ---------- source : str or file-like or `~astropy.io.fits.HDUList` The FITS file to validate. Returns ------- results : list subclass instance The result is returned as nested lists. The first level corresponds to the HDUs in the given file. The next level has an entry for each WCS found in that header. The special subclass of list will pretty-print the results as a table when printed. """ class _WcsValidateWcsResult(list): def __init__(self, key): self._key = key def __repr__(self): result = [f" WCS key '{self._key or ' '}':"] if len(self): for entry in self: for i, line in enumerate(entry.splitlines()): if i == 0: initial_indent = ' - ' else: initial_indent = ' ' result.extend( textwrap.wrap( line, initial_indent=initial_indent, subsequent_indent=' ')) else: result.append(" No issues.") return '\n'.join(result) class _WcsValidateHduResult(list): def __init__(self, hdu_index, hdu_name): self._hdu_index = hdu_index self._hdu_name = hdu_name list.__init__(self) def __repr__(self): if len(self): if self._hdu_name: hdu_name = f' ({self._hdu_name})' else: hdu_name = '' result = [f'HDU {self._hdu_index}{hdu_name}:'] for wcs in self: result.append(repr(wcs)) return '\n'.join(result) return '' class _WcsValidateResults(list): def __repr__(self): result = [] for hdu in self: content = repr(hdu) if len(content): result.append(content) return '\n\n'.join(result) global __warningregistry__ if isinstance(source, fits.HDUList): hdulist = source else: hdulist = fits.open(source) results = _WcsValidateResults() for i, hdu in enumerate(hdulist): hdu_results = _WcsValidateHduResult(i, hdu.name) results.append(hdu_results) with warnings.catch_warnings(record=True) as warning_lines: wcses = find_all_wcs( hdu.header, relax=_wcs.WCSHDR_reject, fix=False, _do_set=False) for wcs in wcses: wcs_results = _WcsValidateWcsResult(wcs.wcs.alt) hdu_results.append(wcs_results) try: del __warningregistry__ except NameError: pass with warnings.catch_warnings(record=True) as warning_lines: warnings.resetwarnings() warnings.simplefilter( "always", FITSFixedWarning, append=True) try: WCS(hdu.header, key=wcs.wcs.alt or ' ', relax=_wcs.WCSHDR_reject, fix=True, _do_set=False) except WcsError as e: wcs_results.append(str(e)) wcs_results.extend([str(x.message) for x in warning_lines]) return results
871a5566c23da0af1aa0d0eb4f874b047012db791ce3988e3c33b865d680f82a
# Licensed under a 3-clause BSD style license - see LICENSE.rst # It gets to be really tedious to type long docstrings in ANSI C # syntax (since multi-line string literals are not valid). # Therefore, the docstrings are written here in doc/docstrings.py, # which are then converted by setup.py into docstrings.h, which is # included by pywcs.c __all__ = ['TWO_OR_MORE_ARGS', 'RETURNS', 'ORIGIN', 'RA_DEC_ORDER'] def _fix(content, indent=0): lines = content.split('\n') indent = '\n' + ' ' * indent return indent.join(lines) def TWO_OR_MORE_ARGS(naxis, indent=0): return _fix( f"""*args There are two accepted forms for the positional arguments: - 2 arguments: An *N* x *{naxis}* array of coordinates, and an *origin*. - more than 2 arguments: An array for each axis, followed by an *origin*. These arrays must be broadcastable to one another. Here, *origin* is the coordinate in the upper left corner of the image. In FITS and Fortran standards, this is 1. In Numpy and C standards this is 0. """, indent) def RETURNS(out_type, indent=0): return _fix(f"""result : array Returns the {out_type}. If the input was a single array and origin, a single array is returned, otherwise a tuple of arrays is returned.""", indent) def ORIGIN(indent=0): return _fix( """ origin : int Specifies the origin of pixel values. The Fortran and FITS standards use an origin of 1. Numpy and C use array indexing with origin at 0. """, indent) def RA_DEC_ORDER(indent=0): return _fix( """ ra_dec_order : bool, optional When `True` will ensure that world coordinates are always given and returned in as (*ra*, *dec*) pairs, regardless of the order of the axes specified by the in the ``CTYPE`` keywords. Default is `False`. """, indent) a = """ ``double array[a_order+1][a_order+1]`` Focal plane transformation matrix. The `SIP`_ ``A_i_j`` matrix used for pixel to focal plane transformation. Its values may be changed in place, but it may not be resized, without creating a new `~astropy.wcs.Sip` object. """ a_order = """ ``int`` (read-only) Order of the polynomial (``A_ORDER``). """ all_pix2world = """ all_pix2world(pixcrd, origin) -> ``double array[ncoord][nelem]`` Transforms pixel coordinates to world coordinates. Does the following: - Detector to image plane correction (if present) - SIP distortion correction (if present) - FITS WCS distortion correction (if present) - wcslib "core" WCS transformation The first three (the distortion corrections) are done in parallel. Parameters ---------- pixcrd : ndarray Array of pixel coordinates as ``double array[ncoord][nelem]``. {} Returns ------- world : ndarray Returns an array of world coordinates as ``double array[ncoord][nelem]``. Raises ------ MemoryError Memory allocation failed. SingularMatrixError Linear transformation matrix is singular. InconsistentAxisTypesError Inconsistent or unrecognized coordinate axis types. ValueError Invalid parameter value. ValueError Invalid coordinate transformation parameters. ValueError x- and y-coordinate arrays are not the same size. InvalidTransformError Invalid coordinate transformation. InvalidTransformError Ill-conditioned coordinate transformation parameters. """.format(ORIGIN()) alt = """ ``str`` Character code for alternate coordinate descriptions. For example, the ``"a"`` in keyword names such as ``CTYPEia``. This is a space character for the primary coordinate description, or one of the 26 upper-case letters, A-Z. """ ap = """ ``double array[ap_order+1][ap_order+1]`` Focal plane to pixel transformation matrix. The `SIP`_ ``AP_i_j`` matrix used for focal plane to pixel transformation. Its values may be changed in place, but it may not be resized, without creating a new `~astropy.wcs.Sip` object. """ ap_order = """ ``int`` (read-only) Order of the polynomial (``AP_ORDER``). """ cel = """ `~astropy.wcs.Celprm` Information required to transform celestial coordinates. """ Celprm = """ Class that contains information required to transform celestial coordinates. It consists of certain members that must be set by the user (given) and others that are set by the WCSLIB routines (returned). Some of the latter are supplied for informational purposes and others are for internal use only. """ Prjprm = """ Class that contains information needed to project or deproject native spherical coordinates. It consists of certain members that must be set by the user (given) and others that are set by the WCSLIB routines (returned). Some of the latter are supplied for informational purposes and others are for internal use only. """ aux = """ `~astropy.wcs.Auxprm` Auxiliary coordinate system information of a specialist nature. """ Auxprm = """ Class that contains auxiliary coordinate system information of a specialist nature. This class can not be constructed directly from Python, but instead is returned from `~astropy.wcs.Wcsprm.aux`. """ axis_types = """ ``int array[naxis]`` An array of four-digit type codes for each axis. - First digit (i.e. 1000s): - 0: Non-specific coordinate type. - 1: Stokes coordinate. - 2: Celestial coordinate (including ``CUBEFACE``). - 3: Spectral coordinate. - Second digit (i.e. 100s): - 0: Linear axis. - 1: Quantized axis (``STOKES``, ``CUBEFACE``). - 2: Non-linear celestial axis. - 3: Non-linear spectral axis. - 4: Logarithmic axis. - 5: Tabular axis. - Third digit (i.e. 10s): - 0: Group number, e.g. lookup table number - The fourth digit is used as a qualifier depending on the axis type. - For celestial axes: - 0: Longitude coordinate. - 1: Latitude coordinate. - 2: ``CUBEFACE`` number. - For lookup tables: the axis number in a multidimensional table. ``CTYPEia`` in ``"4-3"`` form with unrecognized algorithm code will have its type set to -1 and generate an error. """ b = """ ``double array[b_order+1][b_order+1]`` Pixel to focal plane transformation matrix. The `SIP`_ ``B_i_j`` matrix used for pixel to focal plane transformation. Its values may be changed in place, but it may not be resized, without creating a new `~astropy.wcs.Sip` object. """ b_order = """ ``int`` (read-only) Order of the polynomial (``B_ORDER``). """ bounds_check = """ bounds_check(pix2world, world2pix) Enable/disable bounds checking. Parameters ---------- pix2world : bool, optional When `True`, enable bounds checking for the pixel-to-world (p2x) transformations. Default is `True`. world2pix : bool, optional When `True`, enable bounds checking for the world-to-pixel (s2x) transformations. Default is `True`. Notes ----- Note that by default (without calling `bounds_check`) strict bounds checking is enabled. """ bp = """ ``double array[bp_order+1][bp_order+1]`` Focal plane to pixel transformation matrix. The `SIP`_ ``BP_i_j`` matrix used for focal plane to pixel transformation. Its values may be changed in place, but it may not be resized, without creating a new `~astropy.wcs.Sip` object. """ bp_order = """ ``int`` (read-only) Order of the polynomial (``BP_ORDER``). """ cd = """ ``double array[naxis][naxis]`` The ``CDi_ja`` linear transformation matrix. For historical compatibility, three alternate specifications of the linear transformations are available in wcslib. The canonical ``PCi_ja`` with ``CDELTia``, ``CDi_ja``, and the deprecated ``CROTAia`` keywords. Although the latter may not formally co-exist with ``PCi_ja``, the approach here is simply to ignore them if given in conjunction with ``PCi_ja``. `~astropy.wcs.Wcsprm.has_pc`, `~astropy.wcs.Wcsprm.has_cd` and `~astropy.wcs.Wcsprm.has_crota` can be used to determine which of these alternatives are present in the header. These alternate specifications of the linear transformation matrix are translated immediately to ``PCi_ja`` by `~astropy.wcs.Wcsprm.set` and are nowhere visible to the lower-level routines. In particular, `~astropy.wcs.Wcsprm.set` resets `~astropy.wcs.Wcsprm.cdelt` to unity if ``CDi_ja`` is present (and no ``PCi_ja``). If no ``CROTAia`` is associated with the latitude axis, `~astropy.wcs.Wcsprm.set` reverts to a unity ``PCi_ja`` matrix. """ cdelt = """ ``double array[naxis]`` Coordinate increments (``CDELTia``) for each coord axis. If a ``CDi_ja`` linear transformation matrix is present, a warning is raised and `~astropy.wcs.Wcsprm.cdelt` is ignored. The ``CDi_ja`` matrix may be deleted by:: del wcs.wcs.cd An undefined value is represented by NaN. """ cdfix = """ cdfix() Fix erroneously omitted ``CDi_ja`` keywords. Sets the diagonal element of the ``CDi_ja`` matrix to unity if all ``CDi_ja`` keywords associated with a given axis were omitted. According to Paper I, if any ``CDi_ja`` keywords at all are given in a FITS header then those not given default to zero. This results in a singular matrix with an intersecting row and column of zeros. Returns ------- success : int Returns ``0`` for success; ``-1`` if no change required. """ cel_offset = """ ``boolean`` Is there an offset? If `True`, an offset will be applied to ``(x, y)`` to force ``(x, y) = (0, 0)`` at the fiducial point, (phi_0, theta_0). Default is `False`. """ celprm_phi0 = r""" `float`, `None`. The native longitude, :math:`\phi_0`, in degrees of the fiducial point, i.e., the point whose celestial coordinates are given in ''Celprm.ref[0:1]''. If `None` or ``nan``, the initialization routine, ``celset()``, will set this to a projection-specific default. """ celprm_theta0 = r""" `float`, `None`. The native latitude, :math:`\theta_0`, in degrees of the fiducial point, i.e. the point whose celestial coordinates are given in ``Celprm:ref[0:1]``. If `None` or ``nan``, the initialization routine, ``celset()``, will set this to a projection-specific default. """ celprm_ref = """ ``numpy.ndarray`` with 4 elements. (Given) The first pair of values should be set to the celestial longitude and latitude of the fiducial point in degrees - typically right ascension and declination. These are given by the ``CRVALia`` keywords in ``FITS``. (Given and returned) The second pair of values are the native longitude, ``phi_p`` (in degrees), and latitude, ``theta_p`` (in degrees), of the celestial pole (the latter is the same as the celestial latitude of the native pole, ``delta_p``) and these are given by the ``FITS`` keywords ``LONPOLEa`` and ``LATPOLEa`` (or by ``PVi_2a`` and ``PVi_3a`` attached to the longitude axis which take precedence if defined). ``LONPOLEa`` defaults to ``phi0`` if the celestial latitude of the fiducial point of the projection is greater than or equal to the native latitude, otherwise ``phi0 + 180`` (degrees). (This is the condition for the celestial latitude to increase in the same direction as the native latitude at the fiducial point.) ``ref[2]`` may be set to `None` or ``numpy.nan`` or 999.0 to indicate that the correct default should be substituted. ``theta_p``, the native latitude of the celestial pole (or equally the celestial latitude of the native pole, ``delta_p``) is often determined uniquely by ``CRVALia`` and ``LONPOLEa`` in which case ``LATPOLEa`` is ignored. However, in some circumstances there are two valid solutions for ``theta_p`` and ``LATPOLEa`` is used to choose between them. ``LATPOLEa`` is set in ``ref[3]`` and the solution closest to this value is used to reset ``ref[3]``. It is therefore legitimate, for example, to set ``ref[3]`` to ``+90.0`` to choose the more northerly solution - the default if the ``LATPOLEa`` keyword is omitted from the ``FITS`` header. For the special case where the fiducial point of the projection is at native latitude zero, its celestial latitude is zero, and ``LONPOLEa`` = ``+/- 90.0`` then the celestial latitude of the native pole is not determined by the first three reference values and ``LATPOLEa`` specifies it completely. The returned value, celprm.latpreq, specifies how ``LATPOLEa`` was actually used.""" celprm_euler = """ *Read-only* ``numpy.ndarray`` with 5 elements. Euler angles and associated intermediaries derived from the coordinate reference values. The first three values are the ``Z-``, ``X-``, and ``Z``-Euler angles in degrees, and the remaining two are the cosine and sine of the ``X``-Euler angle. """ celprm_latpreq = """ ``int``, *read-only*. For informational purposes, this indicates how the ``LATPOLEa`` keyword was used: - 0: Not required, ``theta_p == delta_p`` was determined uniquely by the ``CRVALia`` and ``LONPOLEa`` keywords. - 1: Required to select between two valid solutions of ``theta_p``. - 2: ``theta_p`` was specified solely by ``LATPOLEa``. """ celprm_isolat = """ ``bool``, *read-only*. True if the spherical rotation preserves the magnitude of the latitude, which occurs if the axes of the native and celestial coordinates are coincident. It signals an opportunity to cache intermediate calculations common to all elements in a vector computation. """ celprm_prj = """ *Read-only* Celestial transformation parameters. Some members of `Prjprm` are read-write, i.e., can be set by the user. For more details, see documentation for `Prjprm`. """ prjprm_r0 = r""" The radius of the generating sphere for the projection, a linear scaling parameter. If this is zero, it will be reset to its default value of :math:`180^\circ/\pi` (the value for FITS WCS). """ prjprm_code = """ Three-letter projection code defined by the FITS standard. """ prjprm_pv = """ Projection parameters. These correspond to the ``PVi_ma`` keywords in FITS, so ``pv[0]`` is ``PVi_0a``, ``pv[1]`` is ``PVi_1a``, etc., where ``i`` denotes the latitude-like axis. Many projections use ``pv[1]`` (``PVi_1a``), some also use ``pv[2]`` (``PVi_2a``) and ``SZP`` uses ``pv[3]`` (``PVi_3a``). ``ZPN`` is currently the only projection that uses any of the others. When setting ``pv`` values using lists or ``numpy.ndarray``, elements set to `None` will be left unchanged while those set to ``numpy.nan`` will be set to ``WCSLIB``'s ``UNDEFINED`` special value. For efficiency purposes, if supplied list or ``numpy.ndarray`` is shorter than the length of the ``pv`` member, then remaining values in ``pv`` will be left unchanged. .. note:: When retrieving ``pv``, a copy of the ``prjprm.pv`` array is returned. Modifying this array values will not modify underlying ``WCSLIB``'s ``prjprm.pv`` data. """ prjprm_pvi = """ Set/Get projection parameters for specific index. These correspond to the ``PVi_ma`` keywords in FITS, so ``pv[0]`` is ``PVi_0a``, ``pv[1]`` is ``PVi_1a``, etc., where ``i`` denotes the latitude-like axis. Many projections use ``pv[1]`` (``PVi_1a``), some also use ``pv[2]`` (``PVi_2a``) and ``SZP`` uses ``pv[3]`` (``PVi_3a``). ``ZPN`` is currently the only projection that uses any of the others. Setting a ``pvi`` value to `None` will reset the corresponding ``WCSLIB``'s ``prjprm.pv`` element to the default value as set by ``WCSLIB``'s ``prjini()``. Setting a ``pvi`` value to ``numpy.nan`` will set the corresponding ``WCSLIB``'s ``prjprm.pv`` element to ``WCSLIB``'s ``UNDEFINED`` special value. """ prjprm_phi0 = r""" The native longitude, :math:`\phi_0` (in degrees) of the reference point, i.e. the point ``(x,y) = (0,0)``. If undefined the initialization routine will set this to a projection-specific default. """ prjprm_theta0 = r""" the native latitude, :math:`\theta_0` (in degrees) of the reference point, i.e. the point ``(x,y) = (0,0)``. If undefined the initialization routine will set this to a projection-specific default. """ prjprm_bounds = """ Controls bounds checking. If ``bounds&1`` then enable strict bounds checking for the spherical-to-Cartesian (``s2x``) transformation for the ``AZP``, ``SZP``, ``TAN``, ``SIN``, ``ZPN``, and ``COP`` projections. If ``bounds&2`` then enable strict bounds checking for the Cartesian-to-spherical transformation (``x2s``) for the ``HPX`` and ``XPH`` projections. If ``bounds&4`` then the Cartesian- to-spherical transformations (``x2s``) will invoke WCSLIB's ``prjbchk()`` to perform bounds checking on the computed native coordinates, with a tolerance set to suit each projection. bounds is set to 7 during initialization by default which enables all checks. Zero it to disable all checking. It is not necessary to reset the ``Prjprm`` struct (via ``Prjprm.set()``) when ``bounds`` is changed. """ prjprm_name = """ *Read-only.* Long name of the projection. """ prjprm_category = """ *Read-only.* Projection category matching the value of the relevant ``wcs`` module constants: PRJ_ZENITHAL, PRJ_CYLINDRICAL, PRJ_PSEUDOCYLINDRICAL, PRJ_CONVENTIONAL, PRJ_CONIC, PRJ_POLYCONIC, PRJ_QUADCUBE, and PRJ_HEALPIX. """ prjprm_w = """ *Read-only.* Intermediate floating-point values derived from the projection parameters, cached here to save recomputation. .. note:: When retrieving ``w``, a copy of the ``prjprm.w`` array is returned. Modifying this array values will not modify underlying ``WCSLIB``'s ``prjprm.w`` data. """ prjprm_pvrange = """ *Read-only.* Range of projection parameter indices: 100 times the first allowed index plus the number of parameters, e.g. ``TAN`` is 0 (no parameters), ``SZP`` is 103 (1 to 3), and ``ZPN`` is 30 (0 to 29). """ prjprm_simplezen = """ *Read-only.* True if the projection is a radially-symmetric zenithal projection. """ prjprm_equiareal = """ *Read-only.* True if the projection is equal area. """ prjprm_conformal = """ *Read-only.* True if the projection is conformal. """ prjprm_global_projection = """ *Read-only.* True if the projection can represent the whole sphere in a finite, non-overlapped mapping. """ prjprm_divergent = """ *Read-only.* True if the projection diverges in latitude. """ prjprm_x0 = r""" *Read-only.* The offset in ``x`` used to force :math:`(x,y) = (0,0)` at :math:`(\phi_0, \theta_0)`. """ prjprm_y0 = r""" *Read-only.* The offset in ``y`` used to force :math:`(x,y) = (0,0)` at :math:`(\phi_0, \theta_0)`. """ prjprm_m = """ *Read-only.* Intermediate integer value (used only for the ``ZPN`` and ``HPX`` projections). """ prjprm_n = """ *Read-only.* Intermediate integer value (used only for the ``ZPN`` and ``HPX`` projections). """ prjprm_set = """ This method sets up a ``Prjprm`` object according to information supplied within it. Note that this routine need not be called directly; it will be invoked by `prjx2s` and `prjs2x` if ``Prjprm.flag`` is anything other than a predefined magic value. The one important property of ``set()`` is that the projection code must be defined in the ``Prjprm`` in order for ``set()`` to identify the required projection. Raises ------ MemoryError Null ``prjprm`` pointer passed to WCSLIB routines. InvalidPrjParametersError Invalid projection parameters. InvalidCoordinateError One or more of the ``(x,y)`` or ``(lon,lat)`` coordinates were invalid. """ prjprm_prjx2s = r""" Deproject Cartesian ``(x,y)`` coordinates in the plane of projection to native spherical coordinates :math:`(\phi,\theta)`. The projection is that specified by ``Prjprm.code``. Parameters ---------- x, y : numpy.ndarray Arrays corresponding to the first (``x``) and second (``y``) projected coordinates. Returns ------- phi, theta : tuple of numpy.ndarray Longitude and latitude :math:`(\phi,\theta)` of the projected point in native spherical coordinates (in degrees). Values corresponding to invalid ``(x,y)`` coordinates are set to ``numpy.nan``. Raises ------ MemoryError Null ``prjprm`` pointer passed to WCSLIB routines. InvalidPrjParametersError Invalid projection parameters. """ prjprm_prjs2x = r""" Project native spherical coordinates :math:`(\phi,\theta)` to Cartesian ``(x,y)`` coordinates in the plane of projection. The projection is that specified by ``Prjprm.code``. Parameters ---------- phi : numpy.ndarray Array corresponding to the longitude :math:`\phi` of the projected point in native spherical coordinates (in degrees). theta : numpy.ndarray Array corresponding to the longitude :math:`\theta` of the projected point in native spherical coordinatess (in degrees). Values corresponding to invalid :math:`(\phi, \theta)` coordinates are set to ``numpy.nan``. Returns ------- x, y : tuple of numpy.ndarray Projected coordinates. Raises ------ MemoryError Null ``prjprm`` pointer passed to WCSLIB routines. InvalidPrjParametersError Invalid projection parameters. """ celfix = """ Translates AIPS-convention celestial projection types, ``-NCP`` and ``-GLS``. Returns ------- success : int Returns ``0`` for success; ``-1`` if no change required. """ cname = """ ``list of strings`` A list of the coordinate axis names, from ``CNAMEia``. """ colax = """ ``int array[naxis]`` An array recording the column numbers for each axis in a pixel list. """ colnum = """ ``int`` Column of FITS binary table associated with this WCS. Where the coordinate representation is associated with an image-array column in a FITS binary table, this property may be used to record the relevant column number. It should be set to zero for an image header or pixel list. """ compare = """ compare(other, cmp=0, tolerance=0.0) Compare two Wcsprm objects for equality. Parameters ---------- other : Wcsprm The other Wcsprm object to compare to. cmp : int, optional A bit field controlling the strictness of the comparison. When 0, (the default), all fields must be identical. The following constants, defined in the `astropy.wcs` module, may be or'ed together to loosen the comparison. - ``WCSCOMPARE_ANCILLARY``: Ignores ancillary keywords that don't change the WCS transformation, such as ``XPOSURE`` or ``EQUINOX``. Note that this also ignores ``DATE-OBS``, which does change the WCS transformation in some cases. - ``WCSCOMPARE_TILING``: Ignore integral differences in ``CRPIXja``. This is the 'tiling' condition, where two WCSes cover different regions of the same map projection and align on the same map grid. - ``WCSCOMPARE_CRPIX``: Ignore any differences at all in ``CRPIXja``. The two WCSes cover different regions of the same map projection but may not align on the same grid map. Overrides ``WCSCOMPARE_TILING``. tolerance : float, optional The amount of tolerance required. For example, for a value of 1e-6, all floating-point values in the objects must be equal to the first 6 decimal places. The default value of 0.0 implies exact equality. Returns ------- equal : bool """ convert = """ convert(array) Perform the unit conversion on the elements of the given *array*, returning an array of the same shape. """ coord = """ ``double array[K_M]...[K_2][K_1][M]`` The tabular coordinate array. Has the dimensions:: (K_M, ... K_2, K_1, M) (see `~astropy.wcs.Tabprm.K`) i.e. with the `M` dimension varying fastest so that the `M` elements of a coordinate vector are stored contiguously in memory. """ copy = """ Creates a deep copy of the WCS object. """ cpdis1 = """ `~astropy.wcs.DistortionLookupTable` The pre-linear transformation distortion lookup table, ``CPDIS1``. """ cpdis2 = """ `~astropy.wcs.DistortionLookupTable` The pre-linear transformation distortion lookup table, ``CPDIS2``. """ crder = """ ``double array[naxis]`` The random error in each coordinate axis, ``CRDERia``. An undefined value is represented by NaN. """ crln_obs = """ ``double`` Carrington heliographic longitude of the observer (deg). If undefined, this is set to `None`. """ crota = """ ``double array[naxis]`` ``CROTAia`` keyvalues for each coordinate axis. For historical compatibility, three alternate specifications of the linear transformations are available in wcslib. The canonical ``PCi_ja`` with ``CDELTia``, ``CDi_ja``, and the deprecated ``CROTAia`` keywords. Although the latter may not formally co-exist with ``PCi_ja``, the approach here is simply to ignore them if given in conjunction with ``PCi_ja``. `~astropy.wcs.Wcsprm.has_pc`, `~astropy.wcs.Wcsprm.has_cd` and `~astropy.wcs.Wcsprm.has_crota` can be used to determine which of these alternatives are present in the header. These alternate specifications of the linear transformation matrix are translated immediately to ``PCi_ja`` by `~astropy.wcs.Wcsprm.set` and are nowhere visible to the lower-level routines. In particular, `~astropy.wcs.Wcsprm.set` resets `~astropy.wcs.Wcsprm.cdelt` to unity if ``CDi_ja`` is present (and no ``PCi_ja``). If no ``CROTAia`` is associated with the latitude axis, `~astropy.wcs.Wcsprm.set` reverts to a unity ``PCi_ja`` matrix. """ crpix = """ ``double array[naxis]`` Coordinate reference pixels (``CRPIXja``) for each pixel axis. """ crval = """ ``double array[naxis]`` Coordinate reference values (``CRVALia``) for each coordinate axis. """ crval_tabprm = """ ``double array[M]`` Index values for the reference pixel for each of the tabular coord axes. """ csyer = """ ``double array[naxis]`` The systematic error in the coordinate value axes, ``CSYERia``. An undefined value is represented by NaN. """ ctype = """ ``list of strings[naxis]`` List of ``CTYPEia`` keyvalues. The `~astropy.wcs.Wcsprm.ctype` keyword values must be in upper case and there must be zero or one pair of matched celestial axis types, and zero or one spectral axis. """ cubeface = """ ``int`` Index into the ``pixcrd`` (pixel coordinate) array for the ``CUBEFACE`` axis. This is used for quadcube projections where the cube faces are stored on a separate axis. The quadcube projections (``TSC``, ``CSC``, ``QSC``) may be represented in FITS in either of two ways: - The six faces may be laid out in one plane and numbered as follows:: 0 4 3 2 1 4 3 2 5 Faces 2, 3 and 4 may appear on one side or the other (or both). The world-to-pixel routines map faces 2, 3 and 4 to the left but the pixel-to-world routines accept them on either side. - The ``COBE`` convention in which the six faces are stored in a three-dimensional structure using a ``CUBEFACE`` axis indexed from 0 to 5 as above. These routines support both methods; `~astropy.wcs.Wcsprm.set` determines which is being used by the presence or absence of a ``CUBEFACE`` axis in `~astropy.wcs.Wcsprm.ctype`. `~astropy.wcs.Wcsprm.p2s` and `~astropy.wcs.Wcsprm.s2p` translate the ``CUBEFACE`` axis representation to the single plane representation understood by the lower-level projection routines. """ cunit = """ ``list of astropy.UnitBase[naxis]`` List of ``CUNITia`` keyvalues as `astropy.units.UnitBase` instances. These define the units of measurement of the ``CRVALia``, ``CDELTia`` and ``CDi_ja`` keywords. As ``CUNITia`` is an optional header keyword, `~astropy.wcs.Wcsprm.cunit` may be left blank but otherwise is expected to contain a standard units specification as defined by WCS Paper I. `~astropy.wcs.Wcsprm.unitfix` is available to translate commonly used non-standard units specifications but this must be done as a separate step before invoking `~astropy.wcs.Wcsprm.set`. For celestial axes, if `~astropy.wcs.Wcsprm.cunit` is not blank, `~astropy.wcs.Wcsprm.set` uses ``wcsunits`` to parse it and scale `~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`, and `~astropy.wcs.Wcsprm.cd` to decimal degrees. It then resets `~astropy.wcs.Wcsprm.cunit` to ``"deg"``. For spectral axes, if `~astropy.wcs.Wcsprm.cunit` is not blank, `~astropy.wcs.Wcsprm.set` uses ``wcsunits`` to parse it and scale `~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`, and `~astropy.wcs.Wcsprm.cd` to SI units. It then resets `~astropy.wcs.Wcsprm.cunit` accordingly. `~astropy.wcs.Wcsprm.set` ignores `~astropy.wcs.Wcsprm.cunit` for other coordinate types; `~astropy.wcs.Wcsprm.cunit` may be used to label coordinate values. """ cylfix = """ cylfix() Fixes WCS keyvalues for malformed cylindrical projections. Returns ------- success : int Returns ``0`` for success; ``-1`` if no change required. """ data = """ ``float array`` The array data for the `~astropy.wcs.DistortionLookupTable`. """ data_wtbarr = """ ``double array`` The array data for the BINTABLE. """ dateavg = """ ``string`` Representative mid-point of the date of observation. In ISO format, ``yyyy-mm-ddThh:mm:ss``. See also -------- astropy.wcs.Wcsprm.dateobs """ dateobs = """ ``string`` Start of the date of observation. In ISO format, ``yyyy-mm-ddThh:mm:ss``. See also -------- astropy.wcs.Wcsprm.dateavg """ datfix = """ datfix() Translates the old ``DATE-OBS`` date format to year-2000 standard form ``(yyyy-mm-ddThh:mm:ss)`` and derives ``MJD-OBS`` from it if not already set. Alternatively, if `~astropy.wcs.Wcsprm.mjdobs` is set and `~astropy.wcs.Wcsprm.dateobs` isn't, then `~astropy.wcs.Wcsprm.datfix` derives `~astropy.wcs.Wcsprm.dateobs` from it. If both are set but disagree by more than half a day then `ValueError` is raised. Returns ------- success : int Returns ``0`` for success; ``-1`` if no change required. """ delta = """ ``double array[M]`` (read-only) Interpolated indices into the coord array. Array of interpolated indices into the coordinate array such that Upsilon_m, as defined in Paper III, is equal to (`~astropy.wcs.Tabprm.p0` [m] + 1) + delta[m]. """ det2im = """ Convert detector coordinates to image plane coordinates. """ det2im1 = """ A `~astropy.wcs.DistortionLookupTable` object for detector to image plane correction in the *x*-axis. """ det2im2 = """ A `~astropy.wcs.DistortionLookupTable` object for detector to image plane correction in the *y*-axis. """ dims = """ ``int array[ndim]`` (read-only) The dimensions of the tabular array `~astropy.wcs.Wtbarr.data`. """ DistortionLookupTable = """ DistortionLookupTable(*table*, *crpix*, *crval*, *cdelt*) Represents a single lookup table for a `distortion paper`_ transformation. Parameters ---------- table : 2-dimensional array The distortion lookup table. crpix : 2-tuple The distortion array reference pixel crval : 2-tuple The image array pixel coordinate cdelt : 2-tuple The grid step size """ dsun_obs = """ ``double`` Distance between the centre of the Sun and the observer (m). If undefined, this is set to `None`. """ equinox = """ ``double`` The equinox associated with dynamical equatorial or ecliptic coordinate systems. ``EQUINOXa`` (or ``EPOCH`` in older headers). Not applicable to ICRS equatorial or ecliptic coordinates. An undefined value is represented by NaN. """ extlev = """ ``int`` (read-only) ``EXTLEV`` identifying the binary table extension. """ extnam = """ ``str`` (read-only) ``EXTNAME`` identifying the binary table extension. """ extrema = """ ``double array[K_M]...[K_2][2][M]`` (read-only) An array recording the minimum and maximum value of each element of the coordinate vector in each row of the coordinate array, with the dimensions:: (K_M, ... K_2, 2, M) (see `~astropy.wcs.Tabprm.K`). The minimum is recorded in the first element of the compressed K_1 dimension, then the maximum. This array is used by the inverse table lookup function to speed up table searches. """ extver = """ ``int`` (read-only) ``EXTVER`` identifying the binary table extension. """ find_all_wcs = """ find_all_wcs(relax=0, keysel=0) Find all WCS transformations in the header. Parameters ---------- header : str The raw FITS header data. relax : bool or int Degree of permissiveness: - `False`: Recognize only FITS keywords defined by the published WCS standard. - `True`: Admit all recognized informal extensions of the WCS standard. - `int`: a bit field selecting specific extensions to accept. See :ref:`astropy:relaxread` for details. keysel : sequence of flags Used to restrict the keyword types considered: - ``WCSHDR_IMGHEAD``: Image header keywords. - ``WCSHDR_BIMGARR``: Binary table image array. - ``WCSHDR_PIXLIST``: Pixel list keywords. If zero, there is no restriction. If -1, `wcspih` is called, rather than `wcstbh`. Returns ------- wcs_list : list of `~astropy.wcs.Wcsprm` """ fix = """ fix(translate_units='', naxis=0) Applies all of the corrections handled separately by `~astropy.wcs.Wcsprm.datfix`, `~astropy.wcs.Wcsprm.unitfix`, `~astropy.wcs.Wcsprm.celfix`, `~astropy.wcs.Wcsprm.spcfix`, `~astropy.wcs.Wcsprm.cylfix` and `~astropy.wcs.Wcsprm.cdfix`. Parameters ---------- translate_units : str, optional Specify which potentially unsafe translations of non-standard unit strings to perform. By default, performs all. Although ``"S"`` is commonly used to represent seconds, its translation to ``"s"`` is potentially unsafe since the standard recognizes ``"S"`` formally as Siemens, however rarely that may be used. The same applies to ``"H"`` for hours (Henry), and ``"D"`` for days (Debye). This string controls what to do in such cases, and is case-insensitive. - If the string contains ``"s"``, translate ``"S"`` to ``"s"``. - If the string contains ``"h"``, translate ``"H"`` to ``"h"``. - If the string contains ``"d"``, translate ``"D"`` to ``"d"``. Thus ``''`` doesn't do any unsafe translations, whereas ``'shd'`` does all of them. naxis : int array, optional Image axis lengths. If this array is set to zero or ``None``, then `~astropy.wcs.Wcsprm.cylfix` will not be invoked. Returns ------- status : dict Returns a dictionary containing the following keys, each referring to a status string for each of the sub-fix functions that were called: - `~astropy.wcs.Wcsprm.cdfix` - `~astropy.wcs.Wcsprm.datfix` - `~astropy.wcs.Wcsprm.unitfix` - `~astropy.wcs.Wcsprm.celfix` - `~astropy.wcs.Wcsprm.spcfix` - `~astropy.wcs.Wcsprm.cylfix` """ get_offset = """ get_offset(x, y) -> (x, y) Returns the offset as defined in the distortion lookup table. Returns ------- coordinate : (2,) tuple The offset from the distortion table for pixel point (*x*, *y*). """ get_cdelt = """ get_cdelt() -> numpy.ndarray Coordinate increments (``CDELTia``) for each coord axis as ``double array[naxis]``. Returns the ``CDELT`` offsets in read-only form. Unlike the `~astropy.wcs.Wcsprm.cdelt` property, this works even when the header specifies the linear transformation matrix in one of the alternative ``CDi_ja`` or ``CROTAia`` forms. This is useful when you want access to the linear transformation matrix, but don't care how it was specified in the header. """ get_pc = """ get_pc() -> numpy.ndarray Returns the ``PC`` matrix in read-only form as ``double array[naxis][naxis]``. Unlike the `~astropy.wcs.Wcsprm.pc` property, this works even when the header specifies the linear transformation matrix in one of the alternative ``CDi_ja`` or ``CROTAia`` forms. This is useful when you want access to the linear transformation matrix, but don't care how it was specified in the header. """ get_ps = """ get_ps() -> list Returns ``PSi_ma`` keywords for each *i* and *m* as list of tuples. Returns ------- ps : list Returned as a list of tuples of the form (*i*, *m*, *value*): - *i*: int. Axis number, as in ``PSi_ma``, (i.e. 1-relative) - *m*: int. Parameter number, as in ``PSi_ma``, (i.e. 0-relative) - *value*: string. Parameter value. See also -------- astropy.wcs.Wcsprm.set_ps : Set ``PSi_ma`` values """ get_pv = """ get_pv() -> list Returns ``PVi_ma`` keywords for each *i* and *m* as list of tuples. Returns ------- sequence of tuple Returned as a list of tuples of the form (*i*, *m*, *value*): - *i*: int. Axis number, as in ``PVi_ma``, (i.e. 1-relative) - *m*: int. Parameter number, as in ``PVi_ma``, (i.e. 0-relative) - *value*: string. Parameter value. See also -------- astropy.wcs.Wcsprm.set_pv : Set ``PVi_ma`` values Notes ----- Note that, if they were not given, `~astropy.wcs.Wcsprm.set` resets the entries for ``PVi_1a``, ``PVi_2a``, ``PVi_3a``, and ``PVi_4a`` for longitude axis *i* to match (``phi_0``, ``theta_0``), the native longitude and latitude of the reference point given by ``LONPOLEa`` and ``LATPOLEa``. """ has_cd = """ has_cd() -> bool Returns `True` if ``CDi_ja`` is present. ``CDi_ja`` is an alternate specification of the linear transformation matrix, maintained for historical compatibility. Matrix elements in the IRAF convention are equivalent to the product ``CDi_ja = CDELTia * PCi_ja``, but the defaults differ from that of the ``PCi_ja`` matrix. If one or more ``CDi_ja`` keywords are present then all unspecified ``CDi_ja`` default to zero. If no ``CDi_ja`` (or ``CROTAia``) keywords are present, then the header is assumed to be in ``PCi_ja`` form whether or not any ``PCi_ja`` keywords are present since this results in an interpretation of ``CDELTia`` consistent with the original FITS specification. While ``CDi_ja`` may not formally co-exist with ``PCi_ja``, it may co-exist with ``CDELTia`` and ``CROTAia`` which are to be ignored. See also -------- astropy.wcs.Wcsprm.cd : Get the raw ``CDi_ja`` values. """ has_cdi_ja = """ has_cdi_ja() -> bool Alias for `~astropy.wcs.Wcsprm.has_cd`. Maintained for backward compatibility. """ has_crota = """ has_crota() -> bool Returns `True` if ``CROTAia`` is present. ``CROTAia`` is an alternate specification of the linear transformation matrix, maintained for historical compatibility. In the AIPS convention, ``CROTAia`` may only be associated with the latitude axis of a celestial axis pair. It specifies a rotation in the image plane that is applied *after* the ``CDELTia``; any other ``CROTAia`` keywords are ignored. ``CROTAia`` may not formally co-exist with ``PCi_ja``. ``CROTAia`` and ``CDELTia`` may formally co-exist with ``CDi_ja`` but if so are to be ignored. See also -------- astropy.wcs.Wcsprm.crota : Get the raw ``CROTAia`` values """ has_crotaia = """ has_crotaia() -> bool Alias for `~astropy.wcs.Wcsprm.has_crota`. Maintained for backward compatibility. """ has_pc = """ has_pc() -> bool Returns `True` if ``PCi_ja`` is present. ``PCi_ja`` is the recommended way to specify the linear transformation matrix. See also -------- astropy.wcs.Wcsprm.pc : Get the raw ``PCi_ja`` values """ has_pci_ja = """ has_pci_ja() -> bool Alias for `~astropy.wcs.Wcsprm.has_pc`. Maintained for backward compatibility. """ hgln_obs = """ ``double`` Stonyhurst heliographic longitude of the observer. If undefined, this is set to `None`. """ hglt_obs = """ ``double`` Heliographic latitude (Carrington or Stonyhurst) of the observer (deg). If undefined, this is set to `None`. """ i = """ ``int`` (read-only) Image axis number. """ imgpix_matrix = """ ``double array[2][2]`` (read-only) Inverse of the ``CDELT`` or ``PC`` matrix. Inverse containing the product of the ``CDELTia`` diagonal matrix and the ``PCi_ja`` matrix. """ is_unity = """ is_unity() -> bool Returns `True` if the linear transformation matrix (`~astropy.wcs.Wcsprm.cd`) is unity. """ K = """ ``int array[M]`` (read-only) The lengths of the axes of the coordinate array. An array of length `M` whose elements record the lengths of the axes of the coordinate array and of each indexing vector. """ kind = """ ``str`` (read-only) ``wcstab`` array type. Character identifying the ``wcstab`` array type: - ``'c'``: coordinate array, - ``'i'``: index vector. """ lat = """ ``int`` (read-only) The index into the world coord array containing latitude values. """ latpole = """ ``double`` The native latitude of the celestial pole, ``LATPOLEa`` (deg). """ lattyp = """ ``string`` (read-only) Celestial axis type for latitude. For example, "RA", "DEC", "GLON", "GLAT", etc. extracted from "RA--", "DEC-", "GLON", "GLAT", etc. in the first four characters of ``CTYPEia`` but with trailing dashes removed. """ lng = """ ``int`` (read-only) The index into the world coord array containing longitude values. """ lngtyp = """ ``string`` (read-only) Celestial axis type for longitude. For example, "RA", "DEC", "GLON", "GLAT", etc. extracted from "RA--", "DEC-", "GLON", "GLAT", etc. in the first four characters of ``CTYPEia`` but with trailing dashes removed. """ lonpole = """ ``double`` The native longitude of the celestial pole. ``LONPOLEa`` (deg). """ M = """ ``int`` (read-only) Number of tabular coordinate axes. """ m = """ ``int`` (read-only) ``wcstab`` axis number for index vectors. """ map = """ ``int array[M]`` Association between axes. A vector of length `~astropy.wcs.Tabprm.M` that defines the association between axis *m* in the *M*-dimensional coordinate array (1 <= *m* <= *M*) and the indices of the intermediate world coordinate and world coordinate arrays. When the intermediate and world coordinate arrays contain the full complement of coordinate elements in image-order, as will usually be the case, then ``map[m-1] == i-1`` for axis *i* in the *N*-dimensional image (1 <= *i* <= *N*). In terms of the FITS keywords:: map[PVi_3a - 1] == i - 1. However, a different association may result if the intermediate coordinates, for example, only contains a (relevant) subset of intermediate world coordinate elements. For example, if *M* == 1 for an image with *N* > 1, it is possible to fill the intermediate coordinates with the relevant coordinate element with ``nelem`` set to 1. In this case ``map[0] = 0`` regardless of the value of *i*. """ mix = """ mix(mixpix, mixcel, vspan, vstep, viter, world, pixcrd, origin) Given either the celestial longitude or latitude plus an element of the pixel coordinate, solves for the remaining elements by iterating on the unknown celestial coordinate element using `~astropy.wcs.Wcsprm.s2p`. Parameters ---------- mixpix : int Which element on the pixel coordinate is given. mixcel : int Which element of the celestial coordinate is given. If *mixcel* = ``1``, celestial longitude is given in ``world[self.lng]``, latitude returned in ``world[self.lat]``. If *mixcel* = ``2``, celestial latitude is given in ``world[self.lat]``, longitude returned in ``world[self.lng]``. vspan : (float, float) Solution interval for the celestial coordinate, in degrees. The ordering of the two limits is irrelevant. Longitude ranges may be specified with any convenient normalization, for example ``(-120,+120)`` is the same as ``(240,480)``, except that the solution will be returned with the same normalization, i.e. lie within the interval specified. vstep : float Step size for solution search, in degrees. If ``0``, a sensible, although perhaps non-optimal default will be used. viter : int If a solution is not found then the step size will be halved and the search recommenced. *viter* controls how many times the step size is halved. The allowed range is 5 - 10. world : ndarray World coordinate elements as ``double array[naxis]``. ``world[self.lng]`` and ``world[self.lat]`` are the celestial longitude and latitude, in degrees. Which is given and which returned depends on the value of *mixcel*. All other elements are given. The results will be written to this array in-place. pixcrd : ndarray Pixel coordinates as ``double array[naxis]``. The element indicated by *mixpix* is given and the remaining elements will be written in-place. {} Returns ------- result : dict Returns a dictionary with the following keys: - *phi* (``double array[naxis]``) - *theta* (``double array[naxis]``) - Longitude and latitude in the native coordinate system of the projection, in degrees. - *imgcrd* (``double array[naxis]``) - Image coordinate elements. ``imgcrd[self.lng]`` and ``imgcrd[self.lat]`` are the projected *x*- and *y*-coordinates, in decimal degrees. - *world* (``double array[naxis]``) - Another reference to the *world* argument passed in. Raises ------ MemoryError Memory allocation failed. SingularMatrixError Linear transformation matrix is singular. InconsistentAxisTypesError Inconsistent or unrecognized coordinate axis types. ValueError Invalid parameter value. InvalidTransformError Invalid coordinate transformation parameters. InvalidTransformError Ill-conditioned coordinate transformation parameters. InvalidCoordinateError Invalid world coordinate. NoSolutionError No solution found in the specified interval. See also -------- astropy.wcs.Wcsprm.lat, astropy.wcs.Wcsprm.lng Get the axes numbers for latitude and longitude Notes ----- Initially, the specified solution interval is checked to see if it's a \"crossing\" interval. If it isn't, a search is made for a crossing solution by iterating on the unknown celestial coordinate starting at the upper limit of the solution interval and decrementing by the specified step size. A crossing is indicated if the trial value of the pixel coordinate steps through the value specified. If a crossing interval is found then the solution is determined by a modified form of \"regula falsi\" division of the crossing interval. If no crossing interval was found within the specified solution interval then a search is made for a \"non-crossing\" solution as may arise from a point of tangency. The process is complicated by having to make allowance for the discontinuities that occur in all map projections. Once one solution has been determined others may be found by subsequent invocations of `~astropy.wcs.Wcsprm.mix` with suitably restricted solution intervals. Note the circumstance that arises when the solution point lies at a native pole of a projection in which the pole is represented as a finite curve, for example the zenithals and conics. In such cases two or more valid solutions may exist but `~astropy.wcs.Wcsprm.mix` only ever returns one. Because of its generality, `~astropy.wcs.Wcsprm.mix` is very compute-intensive. For compute-limited applications, more efficient special-case solvers could be written for simple projections, for example non-oblique cylindrical projections. """.format(ORIGIN()) mjdavg = """ ``double`` Modified Julian Date corresponding to ``DATE-AVG``. ``(MJD = JD - 2400000.5)``. An undefined value is represented by NaN. See also -------- astropy.wcs.Wcsprm.mjdobs """ mjdobs = """ ``double`` Modified Julian Date corresponding to ``DATE-OBS``. ``(MJD = JD - 2400000.5)``. An undefined value is represented by NaN. See also -------- astropy.wcs.Wcsprm.mjdavg """ name = """ ``string`` The name given to the coordinate representation ``WCSNAMEa``. """ naxis = """ ``int`` (read-only) The number of axes (pixel and coordinate). Given by the ``NAXIS`` or ``WCSAXESa`` keyvalues. The number of coordinate axes is determined at parsing time, and can not be subsequently changed. It is determined from the highest of the following: 1. ``NAXIS`` 2. ``WCSAXESa`` 3. The highest axis number in any parameterized WCS keyword. The keyvalue, as well as the keyword, must be syntactically valid otherwise it will not be considered. If none of these keyword types is present, i.e. if the header only contains auxiliary WCS keywords for a particular coordinate representation, then no coordinate description is constructed for it. This value may differ for different coordinate representations of the same image. """ nc = """ ``int`` (read-only) Total number of coord vectors in the coord array. Total number of coordinate vectors in the coordinate array being the product K_1 * K_2 * ... * K_M. """ ndim = """ ``int`` (read-only) Expected dimensionality of the ``wcstab`` array. """ obsgeo = """ ``double array[3]`` Location of the observer in a standard terrestrial reference frame. ``OBSGEO-X``, ``OBSGEO-Y``, ``OBSGEO-Z`` (in meters). An undefined value is represented by NaN. """ p0 = """ ``int array[M]`` Interpolated indices into the coordinate array. Vector of length `~astropy.wcs.Tabprm.M` of interpolated indices into the coordinate array such that Upsilon_m, as defined in Paper III, is equal to ``(p0[m] + 1) + delta[m]``. """ p2s = """ p2s(pixcrd, origin) Converts pixel to world coordinates. Parameters ---------- pixcrd : ndarray Array of pixel coordinates as ``double array[ncoord][nelem]``. {} Returns ------- result : dict Returns a dictionary with the following keys: - *imgcrd*: ndarray - Array of intermediate world coordinates as ``double array[ncoord][nelem]``. For celestial axes, ``imgcrd[][self.lng]`` and ``imgcrd[][self.lat]`` are the projected *x*-, and *y*-coordinates, in pseudo degrees. For spectral axes, ``imgcrd[][self.spec]`` is the intermediate spectral coordinate, in SI units. - *phi*: ndarray - Array as ``double array[ncoord]``. - *theta*: ndarray - Longitude and latitude in the native coordinate system of the projection, in degrees, as ``double array[ncoord]``. - *world*: ndarray - Array of world coordinates as ``double array[ncoord][nelem]``. For celestial axes, ``world[][self.lng]`` and ``world[][self.lat]`` are the celestial longitude and latitude, in degrees. For spectral axes, ``world[][self.spec]`` is the intermediate spectral coordinate, in SI units. - *stat*: ndarray - Status return value for each coordinate as ``int array[ncoord]``. ``0`` for success, ``1+`` for invalid pixel coordinate. Raises ------ MemoryError Memory allocation failed. SingularMatrixError Linear transformation matrix is singular. InconsistentAxisTypesError Inconsistent or unrecognized coordinate axis types. ValueError Invalid parameter value. ValueError *x*- and *y*-coordinate arrays are not the same size. InvalidTransformError Invalid coordinate transformation parameters. InvalidTransformError Ill-conditioned coordinate transformation parameters. See also -------- astropy.wcs.Wcsprm.lat, astropy.wcs.Wcsprm.lng Definition of the latitude and longitude axes """.format(ORIGIN()) p4_pix2foc = """ p4_pix2foc(*pixcrd, origin*) -> ``double array[ncoord][nelem]`` Convert pixel coordinates to focal plane coordinates using `distortion paper`_ lookup-table correction. Parameters ---------- pixcrd : ndarray Array of pixel coordinates as ``double array[ncoord][nelem]``. {} Returns ------- foccrd : ndarray Returns an array of focal plane coordinates as ``double array[ncoord][nelem]``. Raises ------ MemoryError Memory allocation failed. ValueError Invalid coordinate transformation parameters. """.format(ORIGIN()) pc = """ ``double array[naxis][naxis]`` The ``PCi_ja`` (pixel coordinate) transformation matrix. The order is:: [[PC1_1, PC1_2], [PC2_1, PC2_2]] For historical compatibility, three alternate specifications of the linear transformations are available in wcslib. The canonical ``PCi_ja`` with ``CDELTia``, ``CDi_ja``, and the deprecated ``CROTAia`` keywords. Although the latter may not formally co-exist with ``PCi_ja``, the approach here is simply to ignore them if given in conjunction with ``PCi_ja``. `~astropy.wcs.Wcsprm.has_pc`, `~astropy.wcs.Wcsprm.has_cd` and `~astropy.wcs.Wcsprm.has_crota` can be used to determine which of these alternatives are present in the header. These alternate specifications of the linear transformation matrix are translated immediately to ``PCi_ja`` by `~astropy.wcs.Wcsprm.set` and are nowhere visible to the lower-level routines. In particular, `~astropy.wcs.Wcsprm.set` resets `~astropy.wcs.Wcsprm.cdelt` to unity if ``CDi_ja`` is present (and no ``PCi_ja``). If no ``CROTAia`` is associated with the latitude axis, `~astropy.wcs.Wcsprm.set` reverts to a unity ``PCi_ja`` matrix. """ phi0 = """ ``double`` The native latitude of the fiducial point. The point whose celestial coordinates are given in ``ref[1:2]``. If undefined (NaN) the initialization routine, `~astropy.wcs.Wcsprm.set`, will set this to a projection-specific default. See also -------- astropy.wcs.Wcsprm.theta0 """ pix2foc = """ pix2foc(*pixcrd, origin*) -> ``double array[ncoord][nelem]`` Perform both `SIP`_ polynomial and `distortion paper`_ lookup-table correction in parallel. Parameters ---------- pixcrd : ndarray Array of pixel coordinates as ``double array[ncoord][nelem]``. {} Returns ------- foccrd : ndarray Returns an array of focal plane coordinates as ``double array[ncoord][nelem]``. Raises ------ MemoryError Memory allocation failed. ValueError Invalid coordinate transformation parameters. """.format(ORIGIN()) piximg_matrix = """ ``double array[2][2]`` (read-only) Matrix containing the product of the ``CDELTia`` diagonal matrix and the ``PCi_ja`` matrix. """ print_contents = """ print_contents() Print the contents of the `~astropy.wcs.Wcsprm` object to stdout. Probably only useful for debugging purposes, and may be removed in the future. To get a string of the contents, use `repr`. """ print_contents_tabprm = """ print_contents() Print the contents of the `~astropy.wcs.Tabprm` object to stdout. Probably only useful for debugging purposes, and may be removed in the future. To get a string of the contents, use `repr`. """ print_contents_wtbarr = """ print_contents() Print the contents of the `~astropy.wcs.Wtbarr` object to stdout. Probably only useful for debugging purposes, and may be removed in the future. To get a string of the contents, use `repr`. """ radesys = """ ``string`` The equatorial or ecliptic coordinate system type, ``RADESYSa``. """ restfrq = """ ``double`` Rest frequency (Hz) from ``RESTFRQa``. An undefined value is represented by NaN. """ restwav = """ ``double`` Rest wavelength (m) from ``RESTWAVa``. An undefined value is represented by NaN. """ row = """ ``int`` (read-only) Table row number. """ rsun_ref = """ ``double`` Reference radius of the Sun used in coordinate calculations (m). If undefined, this is set to `None`. """ s2p = """ s2p(world, origin) Transforms world coordinates to pixel coordinates. Parameters ---------- world : ndarray Array of world coordinates, in decimal degrees, as ``double array[ncoord][nelem]``. {} Returns ------- result : dict Returns a dictionary with the following keys: - *phi*: ``double array[ncoord]`` - *theta*: ``double array[ncoord]`` - Longitude and latitude in the native coordinate system of the projection, in degrees. - *imgcrd*: ``double array[ncoord][nelem]`` - Array of intermediate world coordinates. For celestial axes, ``imgcrd[][self.lng]`` and ``imgcrd[][self.lat]`` are the projected *x*-, and *y*-coordinates, in pseudo \"degrees\". For quadcube projections with a ``CUBEFACE`` axis, the face number is also returned in ``imgcrd[][self.cubeface]``. For spectral axes, ``imgcrd[][self.spec]`` is the intermediate spectral coordinate, in SI units. - *pixcrd*: ``double array[ncoord][nelem]`` - Array of pixel coordinates. Pixel coordinates are zero-based. - *stat*: ``int array[ncoord]`` - Status return value for each coordinate. ``0`` for success, ``1+`` for invalid pixel coordinate. Raises ------ MemoryError Memory allocation failed. SingularMatrixError Linear transformation matrix is singular. InconsistentAxisTypesError Inconsistent or unrecognized coordinate axis types. ValueError Invalid parameter value. InvalidTransformError Invalid coordinate transformation parameters. InvalidTransformError Ill-conditioned coordinate transformation parameters. See also -------- astropy.wcs.Wcsprm.lat, astropy.wcs.Wcsprm.lng Definition of the latitude and longitude axes """.format(ORIGIN()) sense = """ ``int array[M]`` +1 if monotonically increasing, -1 if decreasing. A vector of length `~astropy.wcs.Tabprm.M` whose elements indicate whether the corresponding indexing vector is monotonically increasing (+1), or decreasing (-1). """ set = """ set() Sets up a WCS object for use according to information supplied within it. Note that this routine need not be called directly; it will be invoked by `~astropy.wcs.Wcsprm.p2s` and `~astropy.wcs.Wcsprm.s2p` if necessary. Some attributes that are based on other attributes (such as `~astropy.wcs.Wcsprm.lattyp` on `~astropy.wcs.Wcsprm.ctype`) may not be correct until after `~astropy.wcs.Wcsprm.set` is called. `~astropy.wcs.Wcsprm.set` strips off trailing blanks in all string members. `~astropy.wcs.Wcsprm.set` recognizes the ``NCP`` projection and converts it to the equivalent ``SIN`` projection and it also recognizes ``GLS`` as a synonym for ``SFL``. It does alias translation for the AIPS spectral types (``FREQ-LSR``, ``FELO-HEL``, etc.) but without changing the input header keywords. Raises ------ MemoryError Memory allocation failed. SingularMatrixError Linear transformation matrix is singular. InconsistentAxisTypesError Inconsistent or unrecognized coordinate axis types. ValueError Invalid parameter value. InvalidTransformError Invalid coordinate transformation parameters. InvalidTransformError Ill-conditioned coordinate transformation parameters. """ set_tabprm = """ set() Allocates memory for work arrays. Also sets up the class according to information supplied within it. Note that this routine need not be called directly; it will be invoked by functions that need it. Raises ------ MemoryError Memory allocation failed. InvalidTabularParametersError Invalid tabular parameters. """ set_celprm = """ set() Sets up a ``celprm`` struct according to information supplied within it. Note that this routine need not be called directly; it will be invoked by functions that need it. Raises ------ MemoryError Memory allocation failed. InvalidPrjParametersError Invalid celestial parameters. """ set_ps = """ set_ps(ps) Sets ``PSi_ma`` keywords for each *i* and *m*. Parameters ---------- ps : sequence of tuple The input must be a sequence of tuples of the form (*i*, *m*, *value*): - *i*: int. Axis number, as in ``PSi_ma``, (i.e. 1-relative) - *m*: int. Parameter number, as in ``PSi_ma``, (i.e. 0-relative) - *value*: string. Parameter value. See also -------- astropy.wcs.Wcsprm.get_ps """ set_pv = """ set_pv(pv) Sets ``PVi_ma`` keywords for each *i* and *m*. Parameters ---------- pv : list of tuple The input must be a sequence of tuples of the form (*i*, *m*, *value*): - *i*: int. Axis number, as in ``PVi_ma``, (i.e. 1-relative) - *m*: int. Parameter number, as in ``PVi_ma``, (i.e. 0-relative) - *value*: float. Parameter value. See also -------- astropy.wcs.Wcsprm.get_pv """ sip = """ Get/set the `~astropy.wcs.Sip` object for performing `SIP`_ distortion correction. """ Sip = """ Sip(*a, b, ap, bp, crpix*) The `~astropy.wcs.Sip` class performs polynomial distortion correction using the `SIP`_ convention in both directions. Parameters ---------- a : ndarray The ``A_i_j`` polynomial for pixel to focal plane transformation as ``double array[m+1][m+1]``. Its size must be (*m* + 1, *m* + 1) where *m* = ``A_ORDER``. b : ndarray The ``B_i_j`` polynomial for pixel to focal plane transformation as ``double array[m+1][m+1]``. Its size must be (*m* + 1, *m* + 1) where *m* = ``B_ORDER``. ap : ndarray The ``AP_i_j`` polynomial for pixel to focal plane transformation as ``double array[m+1][m+1]``. Its size must be (*m* + 1, *m* + 1) where *m* = ``AP_ORDER``. bp : ndarray The ``BP_i_j`` polynomial for pixel to focal plane transformation as ``double array[m+1][m+1]``. Its size must be (*m* + 1, *m* + 1) where *m* = ``BP_ORDER``. crpix : ndarray The reference pixel as ``double array[2]``. Notes ----- Shupe, D. L., M. Moshir, J. Li, D. Makovoz and R. Narron. 2005. "The SIP Convention for Representing Distortion in FITS Image Headers." ADASS XIV. """ sip_foc2pix = """ sip_foc2pix(*foccrd, origin*) -> ``double array[ncoord][nelem]`` Convert focal plane coordinates to pixel coordinates using the `SIP`_ polynomial distortion convention. Parameters ---------- foccrd : ndarray Array of focal plane coordinates as ``double array[ncoord][nelem]``. {} Returns ------- pixcrd : ndarray Returns an array of pixel coordinates as ``double array[ncoord][nelem]``. Raises ------ MemoryError Memory allocation failed. ValueError Invalid coordinate transformation parameters. """.format(ORIGIN()) sip_pix2foc = """ sip_pix2foc(*pixcrd, origin*) -> ``double array[ncoord][nelem]`` Convert pixel coordinates to focal plane coordinates using the `SIP`_ polynomial distortion convention. Parameters ---------- pixcrd : ndarray Array of pixel coordinates as ``double array[ncoord][nelem]``. {} Returns ------- foccrd : ndarray Returns an array of focal plane coordinates as ``double array[ncoord][nelem]``. Raises ------ MemoryError Memory allocation failed. ValueError Invalid coordinate transformation parameters. """.format(ORIGIN()) spcfix = """ spcfix() -> int Translates AIPS-convention spectral coordinate types. {``FREQ``, ``VELO``, ``FELO``}-{``OBS``, ``HEL``, ``LSR``} (e.g. ``FREQ-LSR``, ``VELO-OBS``, ``FELO-HEL``) Returns ------- success : int Returns ``0`` for success; ``-1`` if no change required. """ spec = """ ``int`` (read-only) The index containing the spectral axis values. """ specsys = """ ``string`` Spectral reference frame (standard of rest), ``SPECSYSa``. See also -------- astropy.wcs.Wcsprm.ssysobs, astropy.wcs.Wcsprm.velosys """ sptr = """ sptr(ctype, i=-1) Translates the spectral axis in a WCS object. For example, a ``FREQ`` axis may be translated into ``ZOPT-F2W`` and vice versa. Parameters ---------- ctype : str Required spectral ``CTYPEia``, maximum of 8 characters. The first four characters are required to be given and are never modified. The remaining four, the algorithm code, are completely determined by, and must be consistent with, the first four characters. Wildcarding may be used, i.e. if the final three characters are specified as ``\"???\"``, or if just the eighth character is specified as ``\"?\"``, the correct algorithm code will be substituted and returned. i : int Index of the spectral axis (0-relative). If ``i < 0`` (or not provided), it will be set to the first spectral axis identified from the ``CTYPE`` keyvalues in the FITS header. Raises ------ MemoryError Memory allocation failed. SingularMatrixError Linear transformation matrix is singular. InconsistentAxisTypesError Inconsistent or unrecognized coordinate axis types. ValueError Invalid parameter value. InvalidTransformError Invalid coordinate transformation parameters. InvalidTransformError Ill-conditioned coordinate transformation parameters. InvalidSubimageSpecificationError Invalid subimage specification (no spectral axis). """ ssysobs = """ ``string`` Spectral reference frame. The spectral reference frame in which there is no differential variation in the spectral coordinate across the field-of-view, ``SSYSOBSa``. See also -------- astropy.wcs.Wcsprm.specsys, astropy.wcs.Wcsprm.velosys """ ssyssrc = """ ``string`` Spectral reference frame for redshift. The spectral reference frame (standard of rest) in which the redshift was measured, ``SSYSSRCa``. """ sub = """ sub(axes) Extracts the coordinate description for a subimage from a `~astropy.wcs.WCS` object. The world coordinate system of the subimage must be separable in the sense that the world coordinates at any point in the subimage must depend only on the pixel coordinates of the axes extracted. In practice, this means that the ``PCi_ja`` matrix of the original image must not contain non-zero off-diagonal terms that associate any of the subimage axes with any of the non-subimage axes. `sub` can also add axes to a wcsprm object. The new axes will be created using the defaults set by the Wcsprm constructor which produce a simple, unnamed, linear axis with world coordinates equal to the pixel coordinate. These default values can be changed before invoking `set`. Parameters ---------- axes : int or a sequence. - If an int, include the first *N* axes in their original order. - If a sequence, may contain a combination of image axis numbers (1-relative) or special axis identifiers (see below). Order is significant; ``axes[0]`` is the axis number of the input image that corresponds to the first axis in the subimage, etc. Use an axis number of 0 to create a new axis using the defaults. - If ``0``, ``[]`` or ``None``, do a deep copy. Coordinate axes types may be specified using either strings or special integer constants. The available types are: - ``'longitude'`` / ``WCSSUB_LONGITUDE``: Celestial longitude - ``'latitude'`` / ``WCSSUB_LATITUDE``: Celestial latitude - ``'cubeface'`` / ``WCSSUB_CUBEFACE``: Quadcube ``CUBEFACE`` axis - ``'spectral'`` / ``WCSSUB_SPECTRAL``: Spectral axis - ``'stokes'`` / ``WCSSUB_STOKES``: Stokes axis - ``'temporal'`` / ``WCSSUB_TIME``: Time axis (requires ``WCSLIB`` version 7.8 or greater) - ``'celestial'`` / ``WCSSUB_CELESTIAL``: An alias for the combination of ``'longitude'``, ``'latitude'`` and ``'cubeface'``. Returns ------- new_wcs : `~astropy.wcs.WCS` object Raises ------ MemoryError Memory allocation failed. InvalidSubimageSpecificationError Invalid subimage specification (no spectral axis). NonseparableSubimageCoordinateSystemError Non-separable subimage coordinate system. Notes ----- Combinations of subimage axes of particular types may be extracted in the same order as they occur in the input image by combining the integer constants with the 'binary or' (``|``) operator. For example:: wcs.sub([WCSSUB_LONGITUDE | WCSSUB_LATITUDE | WCSSUB_SPECTRAL]) would extract the longitude, latitude, and spectral axes in the same order as the input image. If one of each were present, the resulting object would have three dimensions. For convenience, ``WCSSUB_CELESTIAL`` is defined as the combination ``WCSSUB_LONGITUDE | WCSSUB_LATITUDE | WCSSUB_CUBEFACE``. The codes may also be negated to extract all but the types specified, for example:: wcs.sub([ WCSSUB_LONGITUDE, WCSSUB_LATITUDE, WCSSUB_CUBEFACE, -(WCSSUB_SPECTRAL | WCSSUB_STOKES)]) The last of these specifies all axis types other than spectral or Stokes. Extraction is done in the order specified by ``axes``, i.e. a longitude axis (if present) would be extracted first (via ``axes[0]``) and not subsequently (via ``axes[3]``). Likewise for the latitude and cubeface axes in this example. The number of dimensions in the returned object may be less than or greater than the length of ``axes``. However, it will never exceed the number of axes in the input image. """ tab = """ ``list of Tabprm`` Tabular coordinate objects. A list of tabular coordinate objects associated with this WCS. """ Tabprm = """ A class to store the information related to tabular coordinates, i.e., coordinates that are defined via a lookup table. This class can not be constructed directly from Python, but instead is returned from `~astropy.wcs.Wcsprm.tab`. """ theta0 = """ ``double`` The native longitude of the fiducial point. The point whose celestial coordinates are given in ``ref[1:2]``. If undefined (NaN) the initialization routine, `~astropy.wcs.Wcsprm.set`, will set this to a projection-specific default. See also -------- astropy.wcs.Wcsprm.phi0 """ to_header = """ to_header(relax=False) `to_header` translates a WCS object into a FITS header. The details of the header depends on context: - If the `~astropy.wcs.Wcsprm.colnum` member is non-zero then a binary table image array header will be produced. - Otherwise, if the `~astropy.wcs.Wcsprm.colax` member is set non-zero then a pixel list header will be produced. - Otherwise, a primary image or image extension header will be produced. The output header will almost certainly differ from the input in a number of respects: 1. The output header only contains WCS-related keywords. In particular, it does not contain syntactically-required keywords such as ``SIMPLE``, ``NAXIS``, ``BITPIX``, or ``END``. 2. Deprecated (e.g. ``CROTAn``) or non-standard usage will be translated to standard (this is partially dependent on whether ``fix`` was applied). 3. Quantities will be converted to the units used internally, basically SI with the addition of degrees. 4. Floating-point quantities may be given to a different decimal precision. 5. Elements of the ``PCi_j`` matrix will be written if and only if they differ from the unit matrix. Thus, if the matrix is unity then no elements will be written. 6. Additional keywords such as ``WCSAXES``, ``CUNITia``, ``LONPOLEa`` and ``LATPOLEa`` may appear. 7. The original keycomments will be lost, although `~astropy.wcs.Wcsprm.to_header` tries hard to write meaningful comments. 8. Keyword order may be changed. Keywords can be translated between the image array, binary table, and pixel lists forms by manipulating the `~astropy.wcs.Wcsprm.colnum` or `~astropy.wcs.Wcsprm.colax` members of the `~astropy.wcs.WCS` object. Parameters ---------- relax : bool or int Degree of permissiveness: - `False`: Recognize only FITS keywords defined by the published WCS standard. - `True`: Admit all recognized informal extensions of the WCS standard. - `int`: a bit field selecting specific extensions to write. See :ref:`astropy:relaxwrite` for details. Returns ------- header : str Raw FITS header as a string. """ ttype = """ ``str`` (read-only) ``TTYPEn`` identifying the column of the binary table that contains the wcstab array. """ unitfix = """ unitfix(translate_units='') Translates non-standard ``CUNITia`` keyvalues. For example, ``DEG`` -> ``deg``, also stripping off unnecessary whitespace. Parameters ---------- translate_units : str, optional Do potentially unsafe translations of non-standard unit strings. Although ``\"S\"`` is commonly used to represent seconds, its recognizes ``\"S\"`` formally as Siemens, however rarely that may be translation to ``\"s\"`` is potentially unsafe since the standard used. The same applies to ``\"H\"`` for hours (Henry), and ``\"D\"`` for days (Debye). This string controls what to do in such cases, and is case-insensitive. - If the string contains ``\"s\"``, translate ``\"S\"`` to ``\"s\"``. - If the string contains ``\"h\"``, translate ``\"H\"`` to ``\"h\"``. - If the string contains ``\"d\"``, translate ``\"D\"`` to ``\"d\"``. Thus ``''`` doesn't do any unsafe translations, whereas ``'shd'`` does all of them. Returns ------- success : int Returns ``0`` for success; ``-1`` if no change required. """ velangl = """ ``double`` Velocity angle. The angle in degrees that should be used to decompose an observed velocity into radial and transverse components. An undefined value is represented by NaN. """ velosys = """ ``double`` Relative radial velocity. The relative radial velocity (m/s) between the observer and the selected standard of rest in the direction of the celestial reference coordinate, ``VELOSYSa``. An undefined value is represented by NaN. See also -------- astropy.wcs.Wcsprm.specsys, astropy.wcs.Wcsprm.ssysobs """ velref = """ ``int`` AIPS velocity code. From ``VELREF`` keyword. """ wcs = """ A `~astropy.wcs.Wcsprm` object to perform the basic `wcslib`_ WCS transformation. """ Wcs = """ Wcs(*sip, cpdis, wcsprm, det2im*) Wcs objects amalgamate basic WCS (as provided by `wcslib`_), with `SIP`_ and `distortion paper`_ operations. To perform all distortion corrections and WCS transformation, use ``all_pix2world``. Parameters ---------- sip : `~astropy.wcs.Sip` object or None cpdis : (2,) tuple of `~astropy.wcs.DistortionLookupTable` or None wcsprm : `~astropy.wcs.Wcsprm` det2im : (2,) tuple of `~astropy.wcs.DistortionLookupTable` or None """ Wcsprm = """ Wcsprm(header=None, key=' ', relax=False, naxis=2, keysel=0, colsel=None) `~astropy.wcs.Wcsprm` performs the core WCS transformations. .. note:: The members of this object correspond roughly to the key/value pairs in the FITS header. However, they are adjusted and normalized in a number of ways that make performing the WCS transformation easier. Therefore, they can not be relied upon to get the original values in the header. For that, use `astropy.io.fits.Header` directly. The FITS header parsing enforces correct FITS "keyword = value" syntax with regard to the equals sign occurring in columns 9 and 10. However, it does recognize free-format character (NOST 100-2.0, Sect. 5.2.1), integer (Sect. 5.2.3), and floating-point values (Sect. 5.2.4) for all keywords. .. warning:: Many of the attributes of this class require additional processing when modifying underlying C structure. When needed, this additional processing is implemented in attribute setters. Therefore, for mutable attributes, one should always set the attribute rather than a slice of its current value (or its individual elements) since the latter may lead the class instance to be in an invalid state. For example, attribute ``crpix`` of a 2D WCS' ``Wcsprm`` object ``wcs`` should be set as ``wcs.crpix = [crpix1, crpix2]`` instead of ``wcs.crpix[0] = crpix1; wcs.crpix[1] = crpix2]``. Parameters ---------- header : `~astropy.io.fits.Header`, str, or None. If ``None``, the object will be initialized to default values. key : str, optional The key referring to a particular WCS transform in the header. This may be either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``\"a\"`` part of ``\"CTYPEia\"``. (*key* may only be provided if *header* is also provided.) relax : bool or int, optional Degree of permissiveness: - `False`: Recognize only FITS keywords defined by the published WCS standard. - `True`: Admit all recognized informal extensions of the WCS standard. - `int`: a bit field selecting specific extensions to accept. See :ref:`astropy:relaxread` for details. naxis : int, optional The number of world coordinates axes for the object. (*naxis* may only be provided if *header* is `None`.) keysel : sequence of flag bits, optional Vector of flag bits that may be used to restrict the keyword types considered: - ``WCSHDR_IMGHEAD``: Image header keywords. - ``WCSHDR_BIMGARR``: Binary table image array. - ``WCSHDR_PIXLIST``: Pixel list keywords. If zero, there is no restriction. If -1, the underlying wcslib function ``wcspih()`` is called, rather than ``wcstbh()``. colsel : sequence of int A sequence of table column numbers used to restrict the keywords considered. `None` indicates no restriction. Raises ------ MemoryError Memory allocation failed. ValueError Invalid key. KeyError Key not found in FITS header. """ wtb = """ ``list of Wtbarr`` objects to construct coordinate lookup tables from BINTABLE. """ Wtbarr = """ Classes to construct coordinate lookup tables from a binary table extension (BINTABLE). This class can not be constructed directly from Python, but instead is returned from `~astropy.wcs.Wcsprm.wtb`. """ zsource = """ ``double`` The redshift, ``ZSOURCEa``, of the source. An undefined value is represented by NaN. """ WcsError = """ Base class of all invalid WCS errors. """ SingularMatrix = """ SingularMatrixError() The linear transformation matrix is singular. """ InconsistentAxisTypes = """ InconsistentAxisTypesError() The WCS header inconsistent or unrecognized coordinate axis type(s). """ InvalidTransform = """ InvalidTransformError() The WCS transformation is invalid, or the transformation parameters are invalid. """ InvalidCoordinate = """ InvalidCoordinateError() One or more of the world coordinates is invalid. """ NoSolution = """ NoSolutionError() No solution can be found in the given interval. """ InvalidSubimageSpecification = """ InvalidSubimageSpecificationError() The subimage specification is invalid. """ NonseparableSubimageCoordinateSystem = """ NonseparableSubimageCoordinateSystemError() Non-separable subimage coordinate system. """ NoWcsKeywordsFound = """ NoWcsKeywordsFoundError() No WCS keywords were found in the given header. """ InvalidTabularParameters = """ InvalidTabularParametersError() The given tabular parameters are invalid. """ InvalidPrjParameters = """ InvalidPrjParametersError() The given projection parameters are invalid. """ mjdbeg = """ ``double`` Modified Julian Date corresponding to ``DATE-BEG``. ``(MJD = JD - 2400000.5)``. An undefined value is represented by NaN. See also -------- astropy.wcs.Wcsprm.mjdbeg """ mjdend = """ ``double`` Modified Julian Date corresponding to ``DATE-END``. ``(MJD = JD - 2400000.5)``. An undefined value is represented by NaN. See also -------- astropy.wcs.Wcsprm.mjdend """ mjdref = """ ``double`` Modified Julian Date corresponding to ``DATE-REF``. ``(MJD = JD - 2400000.5)``. An undefined value is represented by NaN. See also -------- astropy.wcs.Wcsprm.dateref """ bepoch = """ ``double`` Equivalent to ``DATE-OBS``. Expressed as a Besselian epoch. See also -------- astropy.wcs.Wcsprm.dateobs """ jepoch = """ ``double`` Equivalent to ``DATE-OBS``. Expressed as a Julian epoch. See also -------- astropy.wcs.Wcsprm.dateobs """ datebeg = """ ``string`` Date at the start of the observation. In ISO format, ``yyyy-mm-ddThh:mm:ss``. See also -------- astropy.wcs.Wcsprm.datebeg """ dateend = """ ``string`` Date at the end of the observation. In ISO format, ``yyyy-mm-ddThh:mm:ss``. See also -------- astropy.wcs.Wcsprm.dateend """ dateref = """ ``string`` Date of a reference epoch relative to which other time measurements refer. See also -------- astropy.wcs.Wcsprm.dateref """ timesys = """ ``string`` Time scale (UTC, TAI, etc.) in which all other time-related auxiliary header values are recorded. Also defines the time scale for an image axis with CTYPEia set to 'TIME'. See also -------- astropy.wcs.Wcsprm.timesys """ trefpos = """ ``string`` Location in space where the recorded time is valid. See also -------- astropy.wcs.Wcsprm.trefpos """ trefdir = """ ``string`` Reference direction used in calculating a pathlength delay. See also -------- astropy.wcs.Wcsprm.trefdir """ timeunit = """ ``string`` Time units in which the following header values are expressed: ``TSTART``, ``TSTOP``, ``TIMEOFFS``, ``TIMSYER``, ``TIMRDER``, ``TIMEDEL``. It also provides the default value for ``CUNITia`` for time axes. See also -------- astropy.wcs.Wcsprm.trefdir """ plephem = """ ``string`` The Solar System ephemeris used for calculating a pathlength delay. See also -------- astropy.wcs.Wcsprm.plephem """ tstart = """ ``double`` equivalent to DATE-BEG expressed as a time in units of TIMEUNIT relative to DATEREF+TIMEOFFS. See also -------- astropy.wcs.Wcsprm.tstop """ tstop = """ ``double`` equivalent to DATE-END expressed as a time in units of TIMEUNIT relative to DATEREF+TIMEOFFS. See also -------- astropy.wcs.Wcsprm.tstart """ telapse = """ ``double`` equivalent to the elapsed time between DATE-BEG and DATE-END, in units of TIMEUNIT. See also -------- astropy.wcs.Wcsprm.tstart """ timeoffs = """ ``double`` Time offset, which may be used, for example, to provide a uniform clock correction for times referenced to DATEREF. See also -------- astropy.wcs.Wcsprm.timeoffs """ timsyer = """ ``double`` the absolute error of the time values, in units of TIMEUNIT. See also -------- astropy.wcs.Wcsprm.timrder """ timrder = """ ``double`` the accuracy of time stamps relative to each other, in units of TIMEUNIT. See also -------- astropy.wcs.Wcsprm.timsyer """ timedel = """ ``double`` the resolution of the time stamps. See also -------- astropy.wcs.Wcsprm.timedel """ timepixr = """ ``double`` relative position of the time stamps in binned time intervals, a value between 0.0 and 1.0. See also -------- astropy.wcs.Wcsprm.timepixr """ obsorbit = """ ``string`` URI, URL, or name of an orbit ephemeris file giving spacecraft coordinates relating to TREFPOS. See also -------- astropy.wcs.Wcsprm.trefpos """ xposure = """ ``double`` effective exposure time in units of TIMEUNIT. See also -------- astropy.wcs.Wcsprm.timeunit """ czphs = """ ``double array[naxis]`` The time at the zero point of a phase axis, ``CSPHSia``. An undefined value is represented by NaN. """ cperi = """ ``double array[naxis]`` period of a phase axis, CPERIia. An undefined value is represented by NaN. """
c3774b87970dc1c5e6082dee868393c482d84a8c530597b3b1c24d53a27161a0
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ The astropy.time package provides functionality for manipulating times and dates. Specific emphasis is placed on supporting time scales (e.g. UTC, TAI, UT1) and time representations (e.g. JD, MJD, ISO 8601) that are used in astronomy. """ import os import copy import enum import operator import threading from datetime import datetime, date, timedelta from time import strftime from warnings import warn import numpy as np import erfa from astropy import units as u, constants as const from astropy.units import UnitConversionError from astropy.utils import ShapedLikeNDArray from astropy.utils.compat.misc import override__dir__ from astropy.utils.data_info import MixinInfo, data_info_factory from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning from .utils import day_frac from .formats import (TIME_FORMATS, TIME_DELTA_FORMATS, TimeJD, TimeUnique, TimeAstropyTime, TimeDatetime) # Import TimeFromEpoch to avoid breaking code that followed the old example of # making a custom timescale in the documentation. from .formats import TimeFromEpoch # noqa from .time_helper.function_helpers import CUSTOM_FUNCTIONS, UNSUPPORTED_FUNCTIONS from astropy.extern import _strptime __all__ = ['TimeBase', 'Time', 'TimeDelta', 'TimeInfo', 'TimeInfoBase', 'update_leap_seconds', 'TIME_SCALES', 'STANDARD_TIME_SCALES', 'TIME_DELTA_SCALES', 'ScaleValueError', 'OperandTypeError', 'TimeDeltaMissingUnitWarning'] STANDARD_TIME_SCALES = ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc') LOCAL_SCALES = ('local',) TIME_TYPES = dict((scale, scales) for scales in (STANDARD_TIME_SCALES, LOCAL_SCALES) for scale in scales) TIME_SCALES = STANDARD_TIME_SCALES + LOCAL_SCALES MULTI_HOPS = {('tai', 'tcb'): ('tt', 'tdb'), ('tai', 'tcg'): ('tt',), ('tai', 'ut1'): ('utc',), ('tai', 'tdb'): ('tt',), ('tcb', 'tcg'): ('tdb', 'tt'), ('tcb', 'tt'): ('tdb',), ('tcb', 'ut1'): ('tdb', 'tt', 'tai', 'utc'), ('tcb', 'utc'): ('tdb', 'tt', 'tai'), ('tcg', 'tdb'): ('tt',), ('tcg', 'ut1'): ('tt', 'tai', 'utc'), ('tcg', 'utc'): ('tt', 'tai'), ('tdb', 'ut1'): ('tt', 'tai', 'utc'), ('tdb', 'utc'): ('tt', 'tai'), ('tt', 'ut1'): ('tai', 'utc'), ('tt', 'utc'): ('tai',), } GEOCENTRIC_SCALES = ('tai', 'tt', 'tcg') BARYCENTRIC_SCALES = ('tcb', 'tdb') ROTATIONAL_SCALES = ('ut1',) TIME_DELTA_TYPES = dict((scale, scales) for scales in (GEOCENTRIC_SCALES, BARYCENTRIC_SCALES, ROTATIONAL_SCALES, LOCAL_SCALES) for scale in scales) TIME_DELTA_SCALES = GEOCENTRIC_SCALES + BARYCENTRIC_SCALES + ROTATIONAL_SCALES + LOCAL_SCALES # For time scale changes, we need L_G and L_B, which are stored in erfam.h as # /* L_G = 1 - d(TT)/d(TCG) */ # define ERFA_ELG (6.969290134e-10) # /* L_B = 1 - d(TDB)/d(TCB), and TDB (s) at TAI 1977/1/1.0 */ # define ERFA_ELB (1.550519768e-8) # These are exposed in erfa as erfa.ELG and erfa.ELB. # Implied: d(TT)/d(TCG) = 1-L_G # and d(TCG)/d(TT) = 1/(1-L_G) = 1 + (1-(1-L_G))/(1-L_G) = 1 + L_G/(1-L_G) # scale offsets as second = first + first * scale_offset[(first,second)] SCALE_OFFSETS = {('tt', 'tai'): None, ('tai', 'tt'): None, ('tcg', 'tt'): -erfa.ELG, ('tt', 'tcg'): erfa.ELG / (1. - erfa.ELG), ('tcg', 'tai'): -erfa.ELG, ('tai', 'tcg'): erfa.ELG / (1. - erfa.ELG), ('tcb', 'tdb'): -erfa.ELB, ('tdb', 'tcb'): erfa.ELB / (1. - erfa.ELB)} # triple-level dictionary, yay! SIDEREAL_TIME_MODELS = { 'mean': { 'IAU2006': {'function': erfa.gmst06, 'scales': ('ut1', 'tt')}, 'IAU2000': {'function': erfa.gmst00, 'scales': ('ut1', 'tt')}, 'IAU1982': {'function': erfa.gmst82, 'scales': ('ut1',), 'include_tio': False} }, 'apparent': { 'IAU2006A': {'function': erfa.gst06a, 'scales': ('ut1', 'tt')}, 'IAU2000A': {'function': erfa.gst00a, 'scales': ('ut1', 'tt')}, 'IAU2000B': {'function': erfa.gst00b, 'scales': ('ut1',)}, 'IAU1994': {'function': erfa.gst94, 'scales': ('ut1',), 'include_tio': False} }} class _LeapSecondsCheck(enum.Enum): NOT_STARTED = 0 # No thread has reached the check RUNNING = 1 # A thread is running update_leap_seconds (_LEAP_SECONDS_LOCK is held) DONE = 2 # update_leap_seconds has completed _LEAP_SECONDS_CHECK = _LeapSecondsCheck.NOT_STARTED _LEAP_SECONDS_LOCK = threading.RLock() class TimeInfoBase(MixinInfo): """ Container for meta information like name, description, format. This is required when the object is used as a mixin column within a table, but can be used as a general way to store meta information. This base class is common between TimeInfo and TimeDeltaInfo. """ attr_names = MixinInfo.attr_names | {'serialize_method'} _supports_indexing = True # The usual tuple of attributes needed for serialization is replaced # by a property, since Time can be serialized different ways. _represent_as_dict_extra_attrs = ('format', 'scale', 'precision', 'in_subfmt', 'out_subfmt', 'location', '_delta_ut1_utc', '_delta_tdb_tt') # When serializing, write out the `value` attribute using the column name. _represent_as_dict_primary_data = 'value' mask_val = np.ma.masked @property def _represent_as_dict_attrs(self): method = self.serialize_method[self._serialize_context] if method == 'formatted_value': out = ('value',) elif method == 'jd1_jd2': out = ('jd1', 'jd2') else: raise ValueError("serialize method must be 'formatted_value' or 'jd1_jd2'") return out + self._represent_as_dict_extra_attrs def __init__(self, bound=False): super().__init__(bound) # If bound to a data object instance then create the dict of attributes # which stores the info attribute values. if bound: # Specify how to serialize this object depending on context. # If ``True`` for a context, then use formatted ``value`` attribute # (e.g. the ISO time string). If ``False`` then use float jd1 and jd2. self.serialize_method = {'fits': 'jd1_jd2', 'ecsv': 'formatted_value', 'hdf5': 'jd1_jd2', 'yaml': 'jd1_jd2', 'parquet': 'jd1_jd2', None: 'jd1_jd2'} def get_sortable_arrays(self): """ Return a list of arrays which can be lexically sorted to represent the order of the parent column. Returns ------- arrays : list of ndarray """ parent = self._parent jd_approx = parent.jd jd_remainder = (parent - parent.__class__(jd_approx, format='jd')).jd return [jd_approx, jd_remainder] @property def unit(self): return None info_summary_stats = staticmethod( data_info_factory(names=MixinInfo._stats, funcs=[getattr(np, stat) for stat in MixinInfo._stats])) # When Time has mean, std, min, max methods: # funcs = [lambda x: getattr(x, stat)() for stat_name in MixinInfo._stats]) def _construct_from_dict(self, map): if 'jd1' in map and 'jd2' in map: # Initialize as JD but revert to desired format and out_subfmt (if needed) format = map.pop('format') out_subfmt = map.pop('out_subfmt', None) map['format'] = 'jd' map['val'] = map.pop('jd1') map['val2'] = map.pop('jd2') out = self._parent_cls(**map) out.format = format if out_subfmt is not None: out.out_subfmt = out_subfmt else: map['val'] = map.pop('value') out = self._parent_cls(**map) return out def new_like(self, cols, length, metadata_conflicts='warn', name=None): """ Return a new Time instance which is consistent with the input Time objects ``cols`` and has ``length`` rows. This is intended for creating an empty Time instance whose elements can be set in-place for table operations like join or vstack. It checks that the input locations and attributes are consistent. This is used when a Time object is used as a mixin column in an astropy Table. Parameters ---------- cols : list List of input columns (Time objects) length : int Length of the output column object metadata_conflicts : str ('warn'|'error'|'silent') How to handle metadata conflicts name : str Output column name Returns ------- col : Time (or subclass) Empty instance of this class consistent with ``cols`` """ # Get merged info attributes like shape, dtype, format, description, etc. attrs = self.merge_cols_attributes(cols, metadata_conflicts, name, ('meta', 'description')) attrs.pop('dtype') # Not relevant for Time col0 = cols[0] # Check that location is consistent for all Time objects for col in cols[1:]: # This is the method used by __setitem__ to ensure that the right side # has a consistent location (and coerce data if necessary, but that does # not happen in this case since `col` is already a Time object). If this # passes then any subsequent table operations via setitem will work. try: col0._make_value_equivalent(slice(None), col) except ValueError: raise ValueError('input columns have inconsistent locations') # Make a new Time object with the desired shape and attributes shape = (length,) + attrs.pop('shape') jd2000 = 2451544.5 # Arbitrary JD value J2000.0 that will work with ERFA jd1 = np.full(shape, jd2000, dtype='f8') jd2 = np.zeros(shape, dtype='f8') tm_attrs = {attr: getattr(col0, attr) for attr in ('scale', 'location', 'precision', 'in_subfmt', 'out_subfmt')} out = self._parent_cls(jd1, jd2, format='jd', **tm_attrs) out.format = col0.format # Set remaining info attributes for attr, value in attrs.items(): setattr(out.info, attr, value) return out class TimeInfo(TimeInfoBase): """ Container for meta information like name, description, format. This is required when the object is used as a mixin column within a table, but can be used as a general way to store meta information. """ def _represent_as_dict(self, attrs=None): """Get the values for the parent ``attrs`` and return as a dict. By default, uses '_represent_as_dict_attrs'. """ map = super()._represent_as_dict(attrs=attrs) # TODO: refactor these special cases into the TimeFormat classes? # The datetime64 format requires special handling for ECSV (see #12840). # The `value` has numpy dtype datetime64 but this is not an allowed # datatype for ECSV. Instead convert to a string representation. if (self._serialize_context == 'ecsv' and map['format'] == 'datetime64' and 'value' in map): map['value'] = map['value'].astype('U') # The datetime format is serialized as ISO with no loss of precision. if map['format'] == 'datetime' and 'value' in map: map['value'] = np.vectorize(lambda x: x.isoformat())(map['value']) return map def _construct_from_dict(self, map): # See comment above. May need to convert string back to datetime64. # Note that _serialize_context is not set here so we just look for the # string value directly. if (map['format'] == 'datetime64' and 'value' in map and map['value'].dtype.kind == 'U'): map['value'] = map['value'].astype('datetime64') # Convert back to datetime objects for datetime format. if map['format'] == 'datetime' and 'value' in map: from datetime import datetime map['value'] = np.vectorize(datetime.fromisoformat)(map['value']) delta_ut1_utc = map.pop('_delta_ut1_utc', None) delta_tdb_tt = map.pop('_delta_tdb_tt', None) out = super()._construct_from_dict(map) if delta_ut1_utc is not None: out._delta_ut1_utc = delta_ut1_utc if delta_tdb_tt is not None: out._delta_tdb_tt = delta_tdb_tt return out class TimeDeltaInfo(TimeInfoBase): """ Container for meta information like name, description, format. This is required when the object is used as a mixin column within a table, but can be used as a general way to store meta information. """ _represent_as_dict_extra_attrs = ('format', 'scale') def new_like(self, cols, length, metadata_conflicts='warn', name=None): """ Return a new TimeDelta instance which is consistent with the input Time objects ``cols`` and has ``length`` rows. This is intended for creating an empty Time instance whose elements can be set in-place for table operations like join or vstack. It checks that the input locations and attributes are consistent. This is used when a Time object is used as a mixin column in an astropy Table. Parameters ---------- cols : list List of input columns (Time objects) length : int Length of the output column object metadata_conflicts : str ('warn'|'error'|'silent') How to handle metadata conflicts name : str Output column name Returns ------- col : Time (or subclass) Empty instance of this class consistent with ``cols`` """ # Get merged info attributes like shape, dtype, format, description, etc. attrs = self.merge_cols_attributes(cols, metadata_conflicts, name, ('meta', 'description')) attrs.pop('dtype') # Not relevant for Time col0 = cols[0] # Make a new Time object with the desired shape and attributes shape = (length,) + attrs.pop('shape') jd1 = np.zeros(shape, dtype='f8') jd2 = np.zeros(shape, dtype='f8') out = self._parent_cls(jd1, jd2, format='jd', scale=col0.scale) out.format = col0.format # Set remaining info attributes for attr, value in attrs.items(): setattr(out.info, attr, value) return out class TimeBase(ShapedLikeNDArray): """Base time class from which Time and TimeDelta inherit.""" # Make sure that reverse arithmetic (e.g., TimeDelta.__rmul__) # gets called over the __mul__ of Numpy arrays. __array_priority__ = 20000 # Declare that Time can be used as a Table column by defining the # attribute where column attributes will be stored. _astropy_column_attrs = None def __getnewargs__(self): return (self._time,) def _init_from_vals(self, val, val2, format, scale, copy, precision=None, in_subfmt=None, out_subfmt=None): """ Set the internal _format, scale, and _time attrs from user inputs. This handles coercion into the correct shapes and some basic input validation. """ if precision is None: precision = 3 if in_subfmt is None: in_subfmt = '*' if out_subfmt is None: out_subfmt = '*' # Coerce val into an array val = _make_array(val, copy) # If val2 is not None, ensure consistency if val2 is not None: val2 = _make_array(val2, copy) try: np.broadcast(val, val2) except ValueError: raise ValueError('Input val and val2 have inconsistent shape; ' 'they cannot be broadcast together.') if scale is not None: if not (isinstance(scale, str) and scale.lower() in self.SCALES): raise ScaleValueError("Scale {!r} is not in the allowed scales " "{}".format(scale, sorted(self.SCALES))) # If either of the input val, val2 are masked arrays then # find the masked elements and fill them. mask, val, val2 = _check_for_masked_and_fill(val, val2) # Parse / convert input values into internal jd1, jd2 based on format self._time = self._get_time_fmt(val, val2, format, scale, precision, in_subfmt, out_subfmt) self._format = self._time.name # Hack from #9969 to allow passing the location value that has been # collected by the TimeAstropyTime format class up to the Time level. # TODO: find a nicer way. if hasattr(self._time, '_location'): self.location = self._time._location del self._time._location # If any inputs were masked then masked jd2 accordingly. From above # routine ``mask`` must be either Python bool False or an bool ndarray # with shape broadcastable to jd2. if mask is not False: mask = np.broadcast_to(mask, self._time.jd2.shape) self._time.jd1[mask] = 2451544.5 # Set to JD for 2000-01-01 self._time.jd2[mask] = np.nan def _get_time_fmt(self, val, val2, format, scale, precision, in_subfmt, out_subfmt): """ Given the supplied val, val2, format and scale try to instantiate the corresponding TimeFormat class to convert the input values into the internal jd1 and jd2. If format is `None` and the input is a string-type or object array then guess available formats and stop when one matches. """ if (format is None and (val.dtype.kind in ('S', 'U', 'O', 'M') or val.dtype.names)): # Input is a string, object, datetime, or a table-like ndarray # (structured array, recarray). These input types can be # uniquely identified by the format classes. formats = [(name, cls) for name, cls in self.FORMATS.items() if issubclass(cls, TimeUnique)] # AstropyTime is a pseudo-format that isn't in the TIME_FORMATS registry, # but try to guess it at the end. formats.append(('astropy_time', TimeAstropyTime)) elif not (isinstance(format, str) and format.lower() in self.FORMATS): if format is None: raise ValueError("No time format was given, and the input is " "not unique") else: raise ValueError("Format {!r} is not one of the allowed " "formats {}".format(format, sorted(self.FORMATS))) else: formats = [(format, self.FORMATS[format])] assert formats problems = {} for name, cls in formats: try: return cls(val, val2, scale, precision, in_subfmt, out_subfmt) except UnitConversionError: raise except (ValueError, TypeError) as err: # If ``format`` specified then there is only one possibility, so raise # immediately and include the upstream exception message to make it # easier for user to see what is wrong. if len(formats) == 1: raise ValueError( f'Input values did not match the format class {format}:' + os.linesep + f'{err.__class__.__name__}: {err}' ) from err else: problems[name] = err else: raise ValueError(f'Input values did not match any of the formats ' f'where the format keyword is optional: ' f'{problems}') from problems[formats[0][0]] @property def writeable(self): return self._time.jd1.flags.writeable & self._time.jd2.flags.writeable @writeable.setter def writeable(self, value): self._time.jd1.flags.writeable = value self._time.jd2.flags.writeable = value @property def format(self): """ Get or set time format. The format defines the way times are represented when accessed via the ``.value`` attribute. By default it is the same as the format used for initializing the `Time` instance, but it can be set to any other value that could be used for initialization. These can be listed with:: >>> list(Time.FORMATS) ['jd', 'mjd', 'decimalyear', 'unix', 'unix_tai', 'cxcsec', 'gps', 'plot_date', 'stardate', 'datetime', 'ymdhms', 'iso', 'isot', 'yday', 'datetime64', 'fits', 'byear', 'jyear', 'byear_str', 'jyear_str'] """ return self._format @format.setter def format(self, format): """Set time format""" if format not in self.FORMATS: raise ValueError(f'format must be one of {list(self.FORMATS)}') format_cls = self.FORMATS[format] # Get the new TimeFormat object to contain time in new format. Possibly # coerce in/out_subfmt to '*' (default) if existing subfmt values are # not valid in the new format. self._time = format_cls( self._time.jd1, self._time.jd2, self._time._scale, self.precision, in_subfmt=format_cls._get_allowed_subfmt(self.in_subfmt), out_subfmt=format_cls._get_allowed_subfmt(self.out_subfmt), from_jd=True) self._format = format def __repr__(self): return ("<{} object: scale='{}' format='{}' value={}>" .format(self.__class__.__name__, self.scale, self.format, getattr(self, self.format))) def __str__(self): return str(getattr(self, self.format)) def __hash__(self): try: loc = getattr(self, 'location', None) if loc is not None: loc = loc.x.to_value(u.m), loc.y.to_value(u.m), loc.z.to_value(u.m) return hash((self.jd1, self.jd2, self.scale, loc)) except TypeError: if self.ndim != 0: reason = '(must be scalar)' elif self.masked: reason = '(value is masked)' else: raise raise TypeError(f"unhashable type: '{self.__class__.__name__}' {reason}") @property def scale(self): """Time scale""" return self._time.scale def _set_scale(self, scale): """ This is the key routine that actually does time scale conversions. This is not public and not connected to the read-only scale property. """ if scale == self.scale: return if scale not in self.SCALES: raise ValueError("Scale {!r} is not in the allowed scales {}" .format(scale, sorted(self.SCALES))) if scale == 'utc' or self.scale == 'utc': # If doing a transform involving UTC then check that the leap # seconds table is up to date. _check_leapsec() # Determine the chain of scale transformations to get from the current # scale to the new scale. MULTI_HOPS contains a dict of all # transformations (xforms) that require intermediate xforms. # The MULTI_HOPS dict is keyed by (sys1, sys2) in alphabetical order. xform = (self.scale, scale) xform_sort = tuple(sorted(xform)) multi = MULTI_HOPS.get(xform_sort, ()) xforms = xform_sort[:1] + multi + xform_sort[-1:] # If we made the reverse xform then reverse it now. if xform_sort != xform: xforms = tuple(reversed(xforms)) # Transform the jd1,2 pairs through the chain of scale xforms. jd1, jd2 = self._time.jd1, self._time.jd2_filled for sys1, sys2 in zip(xforms[:-1], xforms[1:]): # Some xforms require an additional delta_ argument that is # provided through Time methods. These values may be supplied by # the user or computed based on available approximations. The # get_delta_ methods are available for only one combination of # sys1, sys2 though the property applies for both xform directions. args = [jd1, jd2] for sys12 in ((sys1, sys2), (sys2, sys1)): dt_method = '_get_delta_{}_{}'.format(*sys12) try: get_dt = getattr(self, dt_method) except AttributeError: pass else: args.append(get_dt(jd1, jd2)) break conv_func = getattr(erfa, sys1 + sys2) jd1, jd2 = conv_func(*args) jd1, jd2 = day_frac(jd1, jd2) if self.masked: jd2[self.mask] = np.nan self._time = self.FORMATS[self.format](jd1, jd2, scale, self.precision, self.in_subfmt, self.out_subfmt, from_jd=True) @property def precision(self): """ Decimal precision when outputting seconds as floating point (int value between 0 and 9 inclusive). """ return self._time.precision @precision.setter def precision(self, val): del self.cache if not isinstance(val, int) or val < 0 or val > 9: raise ValueError('precision attribute must be an int between ' '0 and 9') self._time.precision = val @property def in_subfmt(self): """ Unix wildcard pattern to select subformats for parsing string input times. """ return self._time.in_subfmt @in_subfmt.setter def in_subfmt(self, val): self._time.in_subfmt = val del self.cache @property def out_subfmt(self): """ Unix wildcard pattern to select subformats for outputting times. """ return self._time.out_subfmt @out_subfmt.setter def out_subfmt(self, val): # Setting the out_subfmt property here does validation of ``val`` self._time.out_subfmt = val del self.cache @property def shape(self): """The shape of the time instances. Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a tuple. Note that if different instances share some but not all underlying data, setting the shape of one instance can make the other instance unusable. Hence, it is strongly recommended to get new, reshaped instances with the ``reshape`` method. Raises ------ ValueError If the new shape has the wrong total number of elements. AttributeError If the shape of the ``jd1``, ``jd2``, ``location``, ``delta_ut1_utc``, or ``delta_tdb_tt`` attributes cannot be changed without the arrays being copied. For these cases, use the `Time.reshape` method (which copies any arrays that cannot be reshaped in-place). """ return self._time.jd1.shape @shape.setter def shape(self, shape): del self.cache # We have to keep track of arrays that were already reshaped, # since we may have to return those to their original shape if a later # shape-setting fails. reshaped = [] oldshape = self.shape # In-place reshape of data/attributes. Need to access _time.jd1/2 not # self.jd1/2 because the latter are not guaranteed to be the actual # data, and in fact should not be directly changeable from the public # API. for obj, attr in ((self._time, 'jd1'), (self._time, 'jd2'), (self, '_delta_ut1_utc'), (self, '_delta_tdb_tt'), (self, 'location')): val = getattr(obj, attr, None) if val is not None and val.size > 1: try: val.shape = shape except Exception: for val2 in reshaped: val2.shape = oldshape raise else: reshaped.append(val) def _shaped_like_input(self, value): if self._time.jd1.shape: if isinstance(value, np.ndarray): return value else: raise TypeError( f"JD is an array ({self._time.jd1!r}) but value " f"is not ({value!r})") else: # zero-dimensional array, is it safe to unbox? if (isinstance(value, np.ndarray) and not value.shape and not np.ma.is_masked(value)): if value.dtype.kind == 'M': # existing test doesn't want datetime64 converted return value[()] elif value.dtype.fields: # Unpack but keep field names; .item() doesn't # Still don't get python types in the fields return value[()] else: return value.item() else: return value @property def jd1(self): """ First of the two doubles that internally store time value(s) in JD. """ jd1 = self._time.mask_if_needed(self._time.jd1) return self._shaped_like_input(jd1) @property def jd2(self): """ Second of the two doubles that internally store time value(s) in JD. """ jd2 = self._time.mask_if_needed(self._time.jd2) return self._shaped_like_input(jd2) def to_value(self, format, subfmt='*'): """Get time values expressed in specified output format. This method allows representing the ``Time`` object in the desired output ``format`` and optional sub-format ``subfmt``. Available built-in formats include ``jd``, ``mjd``, ``iso``, and so forth. Each format can have its own sub-formats For built-in numerical formats like ``jd`` or ``unix``, ``subfmt`` can be one of 'float', 'long', 'decimal', 'str', or 'bytes'. Here, 'long' uses ``numpy.longdouble`` for somewhat enhanced precision (with the enhancement depending on platform), and 'decimal' :class:`decimal.Decimal` for full precision. For 'str' and 'bytes', the number of digits is also chosen such that time values are represented accurately. For built-in date-like string formats, one of 'date_hms', 'date_hm', or 'date' (or 'longdate_hms', etc., for 5-digit years in `~astropy.time.TimeFITS`). For sub-formats including seconds, the number of digits used for the fractional seconds is as set by `~astropy.time.Time.precision`. Parameters ---------- format : str The format in which one wants the time values. Default: the current format. subfmt : str or None, optional Value or wildcard pattern to select the sub-format in which the values should be given. The default of '*' picks the first available for a given format, i.e., 'float' or 'date_hms'. If `None`, use the instance's ``out_subfmt``. """ # TODO: add a precision argument (but ensure it is keyword argument # only, to make life easier for TimeDelta.to_value()). if format not in self.FORMATS: raise ValueError(f'format must be one of {list(self.FORMATS)}') cache = self.cache['format'] # Try to keep cache behaviour like it was in astropy < 4.0. key = format if subfmt is None else (format, subfmt) if key not in cache: if format == self.format: tm = self else: tm = self.replicate(format=format) # Some TimeFormat subclasses may not be able to handle being passes # on a out_subfmt. This includes some core classes like # TimeBesselianEpochString that do not have any allowed subfmts. But # those do deal with `self.out_subfmt` internally, so if subfmt is # the same, we do not pass it on. kwargs = {} if subfmt is not None and subfmt != tm.out_subfmt: kwargs['out_subfmt'] = subfmt try: value = tm._time.to_value(parent=tm, **kwargs) except TypeError as exc: # Try validating subfmt, e.g. for formats like 'jyear_str' that # do not implement out_subfmt in to_value() (because there are # no allowed subformats). If subfmt is not valid this gives the # same exception as would have occurred if the call to # `to_value()` had succeeded. tm._time._select_subfmts(subfmt) # Subfmt was valid, so fall back to the original exception to see # if it was lack of support for out_subfmt as a call arg. if "unexpected keyword argument 'out_subfmt'" in str(exc): raise ValueError( f"to_value() method for format {format!r} does not " f"support passing a 'subfmt' argument") from None else: # Some unforeseen exception so raise. raise value = tm._shaped_like_input(value) cache[key] = value return cache[key] @property def value(self): """Time value(s) in current format""" return self.to_value(self.format, None) @property def masked(self): return self._time.masked @property def mask(self): return self._time.mask def insert(self, obj, values, axis=0): """ Insert values before the given indices in the column and return a new `~astropy.time.Time` or `~astropy.time.TimeDelta` object. The values to be inserted must conform to the rules for in-place setting of ``Time`` objects (see ``Get and set values`` in the ``Time`` documentation). The API signature matches the ``np.insert`` API, but is more limited. The specification of insert index ``obj`` must be a single integer, and the ``axis`` must be ``0`` for simple row insertion before the index. Parameters ---------- obj : int Integer index before which ``values`` is inserted. values : array-like Value(s) to insert. If the type of ``values`` is different from that of quantity, ``values`` is converted to the matching type. axis : int, optional Axis along which to insert ``values``. Default is 0, which is the only allowed value and will insert a row. Returns ------- out : `~astropy.time.Time` subclass New time object with inserted value(s) """ # Validate inputs: obj arg is integer, axis=0, self is not a scalar, and # input index is in bounds. try: idx0 = operator.index(obj) except TypeError: raise TypeError('obj arg must be an integer') if axis != 0: raise ValueError('axis must be 0') if not self.shape: raise TypeError('cannot insert into scalar {} object' .format(self.__class__.__name__)) if abs(idx0) > len(self): raise IndexError('index {} is out of bounds for axis 0 with size {}' .format(idx0, len(self))) # Turn negative index into positive if idx0 < 0: idx0 = len(self) + idx0 # For non-Time object, use numpy to help figure out the length. (Note annoying # case of a string input that has a length which is not the length we want). if not isinstance(values, self.__class__): values = np.asarray(values) n_values = len(values) if values.shape else 1 # Finally make the new object with the correct length and set values for the # three sections, before insert, the insert, and after the insert. out = self.__class__.info.new_like([self], len(self) + n_values, name=self.info.name) out._time.jd1[:idx0] = self._time.jd1[:idx0] out._time.jd2[:idx0] = self._time.jd2[:idx0] # This uses the Time setting machinery to coerce and validate as necessary. out[idx0:idx0 + n_values] = values out._time.jd1[idx0 + n_values:] = self._time.jd1[idx0:] out._time.jd2[idx0 + n_values:] = self._time.jd2[idx0:] return out def __setitem__(self, item, value): if not self.writeable: if self.shape: raise ValueError('{} object is read-only. Make a ' 'copy() or set "writeable" attribute to True.' .format(self.__class__.__name__)) else: raise ValueError('scalar {} object is read-only.' .format(self.__class__.__name__)) # Any use of setitem results in immediate cache invalidation del self.cache # Setting invalidates transform deltas for attr in ('_delta_tdb_tt', '_delta_ut1_utc'): if hasattr(self, attr): delattr(self, attr) if value is np.ma.masked or value is np.nan: self._time.jd2[item] = np.nan return value = self._make_value_equivalent(item, value) # Finally directly set the jd1/2 values. Locations are known to match. if self.scale is not None: value = getattr(value, self.scale) self._time.jd1[item] = value._time.jd1 self._time.jd2[item] = value._time.jd2 def isclose(self, other, atol=None): """Returns a boolean or boolean array where two Time objects are element-wise equal within a time tolerance. This evaluates the expression below:: abs(self - other) <= atol Parameters ---------- other : `~astropy.time.Time` Time object for comparison. atol : `~astropy.units.Quantity` or `~astropy.time.TimeDelta` Absolute tolerance for equality with units of time (e.g. ``u.s`` or ``u.day``). Default is two bits in the 128-bit JD time representation, equivalent to about 40 picosecs. """ if atol is None: # Note: use 2 bits instead of 1 bit based on experience in precision # tests, since taking the difference with a UTC time means one has # to do a scale change. atol = 2 * np.finfo(float).eps * u.day if not isinstance(atol, (u.Quantity, TimeDelta)): raise TypeError("'atol' argument must be a Quantity or TimeDelta instance, got " f'{atol.__class__.__name__} instead') try: # Separate these out so user sees where the problem is dt = self - other dt = abs(dt) out = dt <= atol except Exception as err: raise TypeError("'other' argument must support subtraction with Time " f"and return a value that supports comparison with " f"{atol.__class__.__name__}: {err}") return out def copy(self, format=None): """ Return a fully independent copy the Time object, optionally changing the format. If ``format`` is supplied then the time format of the returned Time object will be set accordingly, otherwise it will be unchanged from the original. In this method a full copy of the internal time arrays will be made. The internal time arrays are normally not changeable by the user so in most cases the ``replicate()`` method should be used. Parameters ---------- format : str, optional Time format of the copy. Returns ------- tm : Time object Copy of this object """ return self._apply('copy', format=format) def replicate(self, format=None, copy=False, cls=None): """ Return a replica of the Time object, optionally changing the format. If ``format`` is supplied then the time format of the returned Time object will be set accordingly, otherwise it will be unchanged from the original. If ``copy`` is set to `True` then a full copy of the internal time arrays will be made. By default the replica will use a reference to the original arrays when possible to save memory. The internal time arrays are normally not changeable by the user so in most cases it should not be necessary to set ``copy`` to `True`. The convenience method copy() is available in which ``copy`` is `True` by default. Parameters ---------- format : str, optional Time format of the replica. copy : bool, optional Return a true copy instead of using references where possible. Returns ------- tm : Time object Replica of this object """ return self._apply('copy' if copy else 'replicate', format=format, cls=cls) def _apply(self, method, *args, format=None, cls=None, **kwargs): """Create a new time object, possibly applying a method to the arrays. Parameters ---------- method : str or callable If string, can be 'replicate' or the name of a relevant `~numpy.ndarray` method. In the former case, a new time instance with unchanged internal data is created, while in the latter the method is applied to the internal ``jd1`` and ``jd2`` arrays, as well as to possible ``location``, ``_delta_ut1_utc``, and ``_delta_tdb_tt`` arrays. If a callable, it is directly applied to the above arrays. Examples: 'copy', '__getitem__', 'reshape', `~numpy.broadcast_to`. args : tuple Any positional arguments for ``method``. kwargs : dict Any keyword arguments for ``method``. If the ``format`` keyword argument is present, this will be used as the Time format of the replica. Examples -------- Some ways this is used internally:: copy : ``_apply('copy')`` replicate : ``_apply('replicate')`` reshape : ``_apply('reshape', new_shape)`` index or slice : ``_apply('__getitem__', item)`` broadcast : ``_apply(np.broadcast, shape=new_shape)`` """ new_format = self.format if format is None else format if callable(method): apply_method = lambda array: method(array, *args, **kwargs) else: if method == 'replicate': apply_method = None else: apply_method = operator.methodcaller(method, *args, **kwargs) jd1, jd2 = self._time.jd1, self._time.jd2 if apply_method: jd1 = apply_method(jd1) jd2 = apply_method(jd2) # Get a new instance of our class and set its attributes directly. tm = super().__new__(cls or self.__class__) tm._time = TimeJD(jd1, jd2, self.scale, precision=0, in_subfmt='*', out_subfmt='*', from_jd=True) # Optional ndarray attributes. for attr in ('_delta_ut1_utc', '_delta_tdb_tt', 'location'): try: val = getattr(self, attr) except AttributeError: continue if apply_method: # Apply the method to any value arrays (though skip if there is # only an array scalar and the method would return a view, # since in that case nothing would change). if getattr(val, 'shape', ()): val = apply_method(val) elif method == 'copy' or method == 'flatten': # flatten should copy also for a single element array, but # we cannot use it directly for array scalars, since it # always returns a one-dimensional array. So, just copy. val = copy.copy(val) setattr(tm, attr, val) # Copy other 'info' attr only if it has actually been defined and the # time object is not a scalar (issue #10688). # See PR #3898 for further explanation and justification, along # with Quantity.__array_finalize__ if 'info' in self.__dict__: tm.info = self.info # Make the new internal _time object corresponding to the format # in the copy. If the format is unchanged this process is lightweight # and does not create any new arrays. if new_format not in tm.FORMATS: raise ValueError(f'format must be one of {list(tm.FORMATS)}') NewFormat = tm.FORMATS[new_format] tm._time = NewFormat( tm._time.jd1, tm._time.jd2, tm._time._scale, precision=self.precision, in_subfmt=NewFormat._get_allowed_subfmt(self.in_subfmt), out_subfmt=NewFormat._get_allowed_subfmt(self.out_subfmt), from_jd=True) tm._format = new_format tm.SCALES = self.SCALES return tm def __copy__(self): """ Overrides the default behavior of the `copy.copy` function in the python stdlib to behave like `Time.copy`. Does *not* make a copy of the JD arrays - only copies by reference. """ return self.replicate() def __deepcopy__(self, memo): """ Overrides the default behavior of the `copy.deepcopy` function in the python stdlib to behave like `Time.copy`. Does make a copy of the JD arrays. """ return self.copy() def _advanced_index(self, indices, axis=None, keepdims=False): """Turn argmin, argmax output into an advanced index. Argmin, argmax output contains indices along a given axis in an array shaped like the other dimensions. To use this to get values at the correct location, a list is constructed in which the other axes are indexed sequentially. For ``keepdims`` is ``True``, the net result is the same as constructing an index grid with ``np.ogrid`` and then replacing the ``axis`` item with ``indices`` with its shaped expanded at ``axis``. For ``keepdims`` is ``False``, the result is the same but with the ``axis`` dimension removed from all list entries. For ``axis`` is ``None``, this calls :func:`~numpy.unravel_index`. Parameters ---------- indices : array Output of argmin or argmax. axis : int or None axis along which argmin or argmax was used. keepdims : bool Whether to construct indices that keep or remove the axis along which argmin or argmax was used. Default: ``False``. Returns ------- advanced_index : list of arrays Suitable for use as an advanced index. """ if axis is None: return np.unravel_index(indices, self.shape) ndim = self.ndim if axis < 0: axis = axis + ndim if keepdims and indices.ndim < self.ndim: indices = np.expand_dims(indices, axis) index = [indices if i == axis else np.arange(s).reshape( (1,) * (i if keepdims or i < axis else i - 1) + (s,) + (1,) * (ndim - i - (1 if keepdims or i > axis else 2)) ) for i, s in enumerate(self.shape)] return tuple(index) def argmin(self, axis=None, out=None): """Return indices of the minimum values along the given axis. This is similar to :meth:`~numpy.ndarray.argmin`, but adapted to ensure that the full precision given by the two doubles ``jd1`` and ``jd2`` is used. See :func:`~numpy.argmin` for detailed documentation. """ # First get the minimum at normal precision. jd1, jd2 = self.jd1, self.jd2 approx = np.min(jd1 + jd2, axis, keepdims=True) # Approx is very close to the true minimum, and by subtracting it at # full precision, all numbers near 0 can be represented correctly, # so we can be sure we get the true minimum. # The below is effectively what would be done for # dt = (self - self.__class__(approx, format='jd')).jd # which translates to: # approx_jd1, approx_jd2 = day_frac(approx, 0.) # dt = (self.jd1 - approx_jd1) + (self.jd2 - approx_jd2) dt = (jd1 - approx) + jd2 return dt.argmin(axis, out) def argmax(self, axis=None, out=None): """Return indices of the maximum values along the given axis. This is similar to :meth:`~numpy.ndarray.argmax`, but adapted to ensure that the full precision given by the two doubles ``jd1`` and ``jd2`` is used. See :func:`~numpy.argmax` for detailed documentation. """ # For procedure, see comment on argmin. jd1, jd2 = self.jd1, self.jd2 approx = np.max(jd1 + jd2, axis, keepdims=True) dt = (jd1 - approx) + jd2 return dt.argmax(axis, out) def argsort(self, axis=-1): """Returns the indices that would sort the time array. This is similar to :meth:`~numpy.ndarray.argsort`, but adapted to ensure that the full precision given by the two doubles ``jd1`` and ``jd2`` is used, and that corresponding attributes are copied. Internally, it uses :func:`~numpy.lexsort`, and hence no sort method can be chosen. """ # For procedure, see comment on argmin. jd1, jd2 = self.jd1, self.jd2 approx = jd1 + jd2 remainder = (jd1 - approx) + jd2 if axis is None: return np.lexsort((remainder.ravel(), approx.ravel())) else: return np.lexsort(keys=(remainder, approx), axis=axis) def min(self, axis=None, out=None, keepdims=False): """Minimum along a given axis. This is similar to :meth:`~numpy.ndarray.min`, but adapted to ensure that the full precision given by the two doubles ``jd1`` and ``jd2`` is used, and that corresponding attributes are copied. Note that the ``out`` argument is present only for compatibility with ``np.min``; since `Time` instances are immutable, it is not possible to have an actual ``out`` to store the result in. """ if out is not None: raise ValueError("Since `Time` instances are immutable, ``out`` " "cannot be set to anything but ``None``.") return self[self._advanced_index(self.argmin(axis), axis, keepdims)] def max(self, axis=None, out=None, keepdims=False): """Maximum along a given axis. This is similar to :meth:`~numpy.ndarray.max`, but adapted to ensure that the full precision given by the two doubles ``jd1`` and ``jd2`` is used, and that corresponding attributes are copied. Note that the ``out`` argument is present only for compatibility with ``np.max``; since `Time` instances are immutable, it is not possible to have an actual ``out`` to store the result in. """ if out is not None: raise ValueError("Since `Time` instances are immutable, ``out`` " "cannot be set to anything but ``None``.") return self[self._advanced_index(self.argmax(axis), axis, keepdims)] def ptp(self, axis=None, out=None, keepdims=False): """Peak to peak (maximum - minimum) along a given axis. This is similar to :meth:`~numpy.ndarray.ptp`, but adapted to ensure that the full precision given by the two doubles ``jd1`` and ``jd2`` is used. Note that the ``out`` argument is present only for compatibility with `~numpy.ptp`; since `Time` instances are immutable, it is not possible to have an actual ``out`` to store the result in. """ if out is not None: raise ValueError("Since `Time` instances are immutable, ``out`` " "cannot be set to anything but ``None``.") return (self.max(axis, keepdims=keepdims) - self.min(axis, keepdims=keepdims)) def sort(self, axis=-1): """Return a copy sorted along the specified axis. This is similar to :meth:`~numpy.ndarray.sort`, but internally uses indexing with :func:`~numpy.lexsort` to ensure that the full precision given by the two doubles ``jd1`` and ``jd2`` is kept, and that corresponding attributes are properly sorted and copied as well. Parameters ---------- axis : int or None Axis to be sorted. If ``None``, the flattened array is sorted. By default, sort over the last axis. """ return self[self._advanced_index(self.argsort(axis), axis, keepdims=True)] @property def cache(self): """ Return the cache associated with this instance. """ return self._time.cache @cache.deleter def cache(self): del self._time.cache def __getattr__(self, attr): """ Get dynamic attributes to output format or do timescale conversion. """ if attr in self.SCALES and self.scale is not None: cache = self.cache['scale'] if attr not in cache: if attr == self.scale: tm = self else: tm = self.replicate() tm._set_scale(attr) if tm.shape: # Prevent future modification of cached array-like object tm.writeable = False cache[attr] = tm return cache[attr] elif attr in self.FORMATS: return self.to_value(attr, subfmt=None) elif attr in TIME_SCALES: # allowed ones done above (self.SCALES) if self.scale is None: raise ScaleValueError("Cannot convert TimeDelta with " "undefined scale to any defined scale.") else: raise ScaleValueError("Cannot convert {} with scale " "'{}' to scale '{}'" .format(self.__class__.__name__, self.scale, attr)) else: # Should raise AttributeError return self.__getattribute__(attr) @override__dir__ def __dir__(self): result = set(self.SCALES) result.update(self.FORMATS) return result def _match_shape(self, val): """ Ensure that `val` is matched to length of self. If val has length 1 then broadcast, otherwise cast to double and make sure shape matches. """ val = _make_array(val, copy=True) # be conservative and copy if val.size > 1 and val.shape != self.shape: try: # check the value can be broadcast to the shape of self. val = np.broadcast_to(val, self.shape, subok=True) except Exception: raise ValueError('Attribute shape must match or be ' 'broadcastable to that of Time object. ' 'Typically, give either a single value or ' 'one for each time.') return val def _time_comparison(self, other, op): """If other is of same class as self, compare difference in self.scale. Otherwise, return NotImplemented """ if other.__class__ is not self.__class__: try: other = self.__class__(other, scale=self.scale) except Exception: # Let other have a go. return NotImplemented if(self.scale is not None and self.scale not in other.SCALES or other.scale is not None and other.scale not in self.SCALES): # Other will also not be able to do it, so raise a TypeError # immediately, allowing us to explain why it doesn't work. raise TypeError("Cannot compare {} instances with scales " "'{}' and '{}'".format(self.__class__.__name__, self.scale, other.scale)) if self.scale is not None and other.scale is not None: other = getattr(other, self.scale) return op((self.jd1 - other.jd1) + (self.jd2 - other.jd2), 0.) def __lt__(self, other): return self._time_comparison(other, operator.lt) def __le__(self, other): return self._time_comparison(other, operator.le) def __eq__(self, other): """ If other is an incompatible object for comparison, return `False`. Otherwise, return `True` if the time difference between self and other is zero. """ return self._time_comparison(other, operator.eq) def __ne__(self, other): """ If other is an incompatible object for comparison, return `True`. Otherwise, return `False` if the time difference between self and other is zero. """ return self._time_comparison(other, operator.ne) def __gt__(self, other): return self._time_comparison(other, operator.gt) def __ge__(self, other): return self._time_comparison(other, operator.ge) class Time(TimeBase): """ Represent and manipulate times and dates for astronomy. A `Time` object is initialized with one or more times in the ``val`` argument. The input times in ``val`` must conform to the specified ``format`` and must correspond to the specified time ``scale``. The optional ``val2`` time input should be supplied only for numeric input formats (e.g. JD) where very high precision (better than 64-bit precision) is required. The allowed values for ``format`` can be listed with:: >>> list(Time.FORMATS) ['jd', 'mjd', 'decimalyear', 'unix', 'unix_tai', 'cxcsec', 'gps', 'plot_date', 'stardate', 'datetime', 'ymdhms', 'iso', 'isot', 'yday', 'datetime64', 'fits', 'byear', 'jyear', 'byear_str', 'jyear_str'] See also: http://docs.astropy.org/en/stable/time/ Parameters ---------- val : sequence, ndarray, number, str, bytes, or `~astropy.time.Time` object Value(s) to initialize the time or times. Bytes are decoded as ascii. val2 : sequence, ndarray, or number; optional Value(s) to initialize the time or times. Only used for numerical input, to help preserve precision. format : str, optional Format of input value(s) scale : str, optional Time scale of input value(s), must be one of the following: ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc') precision : int, optional Digits of precision in string representation of time in_subfmt : str, optional Unix glob to select subformats for parsing input times out_subfmt : str, optional Unix glob to select subformat for outputting times location : `~astropy.coordinates.EarthLocation` or tuple, optional If given as an tuple, it should be able to initialize an an EarthLocation instance, i.e., either contain 3 items with units of length for geocentric coordinates, or contain a longitude, latitude, and an optional height for geodetic coordinates. Can be a single location, or one for each input time. If not given, assumed to be the center of the Earth for time scale transformations to and from the solar-system barycenter. copy : bool, optional Make a copy of the input values """ SCALES = TIME_SCALES """List of time scales""" FORMATS = TIME_FORMATS """Dict of time formats""" def __new__(cls, val, val2=None, format=None, scale=None, precision=None, in_subfmt=None, out_subfmt=None, location=None, copy=False): if isinstance(val, Time): self = val.replicate(format=format, copy=copy, cls=cls) else: self = super().__new__(cls) return self def __init__(self, val, val2=None, format=None, scale=None, precision=None, in_subfmt=None, out_subfmt=None, location=None, copy=False): if location is not None: from astropy.coordinates import EarthLocation if isinstance(location, EarthLocation): self.location = location else: self.location = EarthLocation(*location) if self.location.size == 1: self.location = self.location.squeeze() else: if not hasattr(self, 'location'): self.location = None if isinstance(val, Time): # Update _time formatting parameters if explicitly specified if precision is not None: self._time.precision = precision if in_subfmt is not None: self._time.in_subfmt = in_subfmt if out_subfmt is not None: self._time.out_subfmt = out_subfmt self.SCALES = TIME_TYPES[self.scale] if scale is not None: self._set_scale(scale) else: self._init_from_vals(val, val2, format, scale, copy, precision, in_subfmt, out_subfmt) self.SCALES = TIME_TYPES[self.scale] if self.location is not None and (self.location.size > 1 and self.location.shape != self.shape): try: # check the location can be broadcast to self's shape. self.location = np.broadcast_to(self.location, self.shape, subok=True) except Exception as err: raise ValueError('The location with shape {} cannot be ' 'broadcast against time with shape {}. ' 'Typically, either give a single location or ' 'one for each time.' .format(self.location.shape, self.shape)) from err def _make_value_equivalent(self, item, value): """Coerce setitem value into an equivalent Time object""" # If there is a vector location then broadcast to the Time shape # and then select with ``item`` if self.location is not None and self.location.shape: self_location = np.broadcast_to(self.location, self.shape, subok=True)[item] else: self_location = self.location if isinstance(value, Time): # Make sure locations are compatible. Location can be either None or # a Location object. if self_location is None and value.location is None: match = True elif ((self_location is None and value.location is not None) or (self_location is not None and value.location is None)): match = False else: match = np.all(self_location == value.location) if not match: raise ValueError('cannot set to Time with different location: ' 'expected location={} and ' 'got location={}' .format(self_location, value.location)) else: try: value = self.__class__(value, scale=self.scale, location=self_location) except Exception: try: value = self.__class__(value, scale=self.scale, format=self.format, location=self_location) except Exception as err: raise ValueError('cannot convert value to a compatible Time object: {}' .format(err)) return value @classmethod def now(cls): """ Creates a new object corresponding to the instant in time this method is called. .. note:: "Now" is determined using the `~datetime.datetime.utcnow` function, so its accuracy and precision is determined by that function. Generally that means it is set by the accuracy of your system clock. Returns ------- nowtime : :class:`~astropy.time.Time` A new `Time` object (or a subclass of `Time` if this is called from such a subclass) at the current time. """ # call `utcnow` immediately to be sure it's ASAP dtnow = datetime.utcnow() return cls(val=dtnow, format='datetime', scale='utc') info = TimeInfo() @classmethod def strptime(cls, time_string, format_string, **kwargs): """ Parse a string to a Time according to a format specification. See `time.strptime` documentation for format specification. >>> Time.strptime('2012-Jun-30 23:59:60', '%Y-%b-%d %H:%M:%S') <Time object: scale='utc' format='isot' value=2012-06-30T23:59:60.000> Parameters ---------- time_string : str, sequence, or ndarray Objects containing time data of type string format_string : str String specifying format of time_string. kwargs : dict Any keyword arguments for ``Time``. If the ``format`` keyword argument is present, this will be used as the Time format. Returns ------- time_obj : `~astropy.time.Time` A new `~astropy.time.Time` object corresponding to the input ``time_string``. """ time_array = np.asarray(time_string) if time_array.dtype.kind not in ('U', 'S'): err = "Expected type is string, a bytes-like object or a sequence"\ " of these. Got dtype '{}'".format(time_array.dtype.kind) raise TypeError(err) to_string = (str if time_array.dtype.kind == 'U' else lambda x: str(x.item(), encoding='ascii')) iterator = np.nditer([time_array, None], op_dtypes=[time_array.dtype, 'U30']) for time, formatted in iterator: tt, fraction = _strptime._strptime(to_string(time), format_string) time_tuple = tt[:6] + (fraction,) formatted[...] = '{:04}-{:02}-{:02}T{:02}:{:02}:{:02}.{:06}'\ .format(*time_tuple) format = kwargs.pop('format', None) out = cls(*iterator.operands[1:], format='isot', **kwargs) if format is not None: out.format = format return out def strftime(self, format_spec): """ Convert Time to a string or a numpy.array of strings according to a format specification. See `time.strftime` documentation for format specification. Parameters ---------- format_spec : str Format definition of return string. Returns ------- formatted : str or numpy.array String or numpy.array of strings formatted according to the given format string. """ formatted_strings = [] for sk in self.replicate('iso')._time.str_kwargs(): date_tuple = date(sk['year'], sk['mon'], sk['day']).timetuple() datetime_tuple = (sk['year'], sk['mon'], sk['day'], sk['hour'], sk['min'], sk['sec'], date_tuple[6], date_tuple[7], -1) fmtd_str = format_spec if '%f' in fmtd_str: fmtd_str = fmtd_str.replace('%f', '{frac:0{precision}}'.format( frac=sk['fracsec'], precision=self.precision)) fmtd_str = strftime(fmtd_str, datetime_tuple) formatted_strings.append(fmtd_str) if self.isscalar: return formatted_strings[0] else: return np.array(formatted_strings).reshape(self.shape) def light_travel_time(self, skycoord, kind='barycentric', location=None, ephemeris=None): """Light travel time correction to the barycentre or heliocentre. The frame transformations used to calculate the location of the solar system barycentre and the heliocentre rely on the erfa routine epv00, which is consistent with the JPL DE405 ephemeris to an accuracy of 11.2 km, corresponding to a light travel time of 4 microseconds. The routine assumes the source(s) are at large distance, i.e., neglects finite-distance effects. Parameters ---------- skycoord : `~astropy.coordinates.SkyCoord` The sky location to calculate the correction for. kind : str, optional ``'barycentric'`` (default) or ``'heliocentric'`` location : `~astropy.coordinates.EarthLocation`, optional The location of the observatory to calculate the correction for. If no location is given, the ``location`` attribute of the Time object is used ephemeris : str, optional Solar system ephemeris to use (e.g., 'builtin', 'jpl'). By default, use the one set with ``astropy.coordinates.solar_system_ephemeris.set``. For more information, see `~astropy.coordinates.solar_system_ephemeris`. Returns ------- time_offset : `~astropy.time.TimeDelta` The time offset between the barycentre or Heliocentre and Earth, in TDB seconds. Should be added to the original time to get the time in the Solar system barycentre or the Heliocentre. Also, the time conversion to BJD will then include the relativistic correction as well. """ if kind.lower() not in ('barycentric', 'heliocentric'): raise ValueError("'kind' parameter must be one of 'heliocentric' " "or 'barycentric'") if location is None: if self.location is None: raise ValueError('An EarthLocation needs to be set or passed ' 'in to calculate bary- or heliocentric ' 'corrections') location = self.location from astropy.coordinates import (UnitSphericalRepresentation, CartesianRepresentation, HCRS, ICRS, GCRS, solar_system_ephemeris) # ensure sky location is ICRS compatible if not skycoord.is_transformable_to(ICRS()): raise ValueError("Given skycoord is not transformable to the ICRS") # get location of observatory in ITRS coordinates at this Time try: itrs = location.get_itrs(obstime=self) except Exception: raise ValueError("Supplied location does not have a valid `get_itrs` method") with solar_system_ephemeris.set(ephemeris): if kind.lower() == 'heliocentric': # convert to heliocentric coordinates, aligned with ICRS cpos = itrs.transform_to(HCRS(obstime=self)).cartesian.xyz else: # first we need to convert to GCRS coordinates with the correct # obstime, since ICRS coordinates have no frame time gcrs_coo = itrs.transform_to(GCRS(obstime=self)) # convert to barycentric (BCRS) coordinates, aligned with ICRS cpos = gcrs_coo.transform_to(ICRS()).cartesian.xyz # get unit ICRS vector to star spos = (skycoord.icrs.represent_as(UnitSphericalRepresentation). represent_as(CartesianRepresentation).xyz) # Move X,Y,Z to last dimension, to enable possible broadcasting below. cpos = np.rollaxis(cpos, 0, cpos.ndim) spos = np.rollaxis(spos, 0, spos.ndim) # calculate light travel time correction tcor_val = (spos * cpos).sum(axis=-1) / const.c return TimeDelta(tcor_val, scale='tdb') def earth_rotation_angle(self, longitude=None): """Calculate local Earth rotation angle. Parameters ---------- longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional The longitude on the Earth at which to compute the Earth rotation angle (taken from a location as needed). If `None` (default), taken from the ``location`` attribute of the Time instance. If the special string 'tio', the result will be relative to the Terrestrial Intermediate Origin (TIO) (i.e., the output of `~erfa.era00`). Returns ------- `~astropy.coordinates.Longitude` Local Earth rotation angle with units of hourangle. See Also -------- astropy.time.Time.sidereal_time References ---------- IAU 2006 NFA Glossary (currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html) Notes ----- The difference between apparent sidereal time and Earth rotation angle is the equation of the origins, which is the angle between the Celestial Intermediate Origin (CIO) and the equinox. Applying apparent sidereal time to the hour angle yields the true apparent Right Ascension with respect to the equinox, while applying the Earth rotation angle yields the intermediate (CIRS) Right Ascension with respect to the CIO. The result includes the TIO locator (s'), which positions the Terrestrial Intermediate Origin on the equator of the Celestial Intermediate Pole (CIP) and is rigorously corrected for polar motion. (except when ``longitude='tio'``). """ # noqa if isinstance(longitude, str) and longitude == 'tio': longitude = 0 include_tio = False else: include_tio = True return self._sid_time_or_earth_rot_ang(longitude=longitude, function=erfa.era00, scales=('ut1',), include_tio=include_tio) def sidereal_time(self, kind, longitude=None, model=None): """Calculate sidereal time. Parameters ---------- kind : str ``'mean'`` or ``'apparent'``, i.e., accounting for precession only, or also for nutation. longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional The longitude on the Earth at which to compute the Earth rotation angle (taken from a location as needed). If `None` (default), taken from the ``location`` attribute of the Time instance. If the special string 'greenwich' or 'tio', the result will be relative to longitude 0 for models before 2000, and relative to the Terrestrial Intermediate Origin (TIO) for later ones (i.e., the output of the relevant ERFA function that calculates greenwich sidereal time). model : str or None; optional Precession (and nutation) model to use. The available ones are: - {0}: {1} - {2}: {3} If `None` (default), the last (most recent) one from the appropriate list above is used. Returns ------- `~astropy.coordinates.Longitude` Local sidereal time, with units of hourangle. See Also -------- astropy.time.Time.earth_rotation_angle References ---------- IAU 2006 NFA Glossary (currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html) Notes ----- The difference between apparent sidereal time and Earth rotation angle is the equation of the origins, which is the angle between the Celestial Intermediate Origin (CIO) and the equinox. Applying apparent sidereal time to the hour angle yields the true apparent Right Ascension with respect to the equinox, while applying the Earth rotation angle yields the intermediate (CIRS) Right Ascension with respect to the CIO. For the IAU precession models from 2000 onwards, the result includes the TIO locator (s'), which positions the Terrestrial Intermediate Origin on the equator of the Celestial Intermediate Pole (CIP) and is rigorously corrected for polar motion (except when ``longitude='tio'`` or ``'greenwich'``). """ # noqa (docstring is formatted below) if kind.lower() not in SIDEREAL_TIME_MODELS.keys(): raise ValueError('The kind of sidereal time has to be {}'.format( ' or '.join(sorted(SIDEREAL_TIME_MODELS.keys())))) available_models = SIDEREAL_TIME_MODELS[kind.lower()] if model is None: model = sorted(available_models.keys())[-1] elif model.upper() not in available_models: raise ValueError( 'Model {} not implemented for {} sidereal time; ' 'available models are {}' .format(model, kind, sorted(available_models.keys()))) model_kwargs = available_models[model.upper()] if isinstance(longitude, str) and longitude in ('tio', 'greenwich'): longitude = 0 model_kwargs = model_kwargs.copy() model_kwargs['include_tio'] = False return self._sid_time_or_earth_rot_ang(longitude=longitude, **model_kwargs) if isinstance(sidereal_time.__doc__, str): sidereal_time.__doc__ = sidereal_time.__doc__.format( 'apparent', sorted(SIDEREAL_TIME_MODELS['apparent'].keys()), 'mean', sorted(SIDEREAL_TIME_MODELS['mean'].keys())) def _sid_time_or_earth_rot_ang(self, longitude, function, scales, include_tio=True): """Calculate a local sidereal time or Earth rotation angle. Parameters ---------- longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional The longitude on the Earth at which to compute the Earth rotation angle (taken from a location as needed). If `None` (default), taken from the ``location`` attribute of the Time instance. function : callable The ERFA function to use. scales : tuple of str The time scales that the function requires on input. include_tio : bool, optional Whether to includes the TIO locator corrected for polar motion. Should be `False` for pre-2000 IAU models. Default: `True`. Returns ------- `~astropy.coordinates.Longitude` Local sidereal time or Earth rotation angle, with units of hourangle. """ # noqa from astropy.coordinates import Longitude, EarthLocation from astropy.coordinates.builtin_frames.utils import get_polar_motion from astropy.coordinates.matrix_utilities import rotation_matrix if longitude is None: if self.location is None: raise ValueError('No longitude is given but the location for ' 'the Time object is not set.') longitude = self.location.lon elif isinstance(longitude, EarthLocation): longitude = longitude.lon else: # Sanity check on input; default unit is degree. longitude = Longitude(longitude, u.degree, copy=False) theta = self._call_erfa(function, scales) if include_tio: # TODO: this duplicates part of coordinates.erfa_astrom.ErfaAstrom.apio; # maybe posisble to factor out to one or the other. sp = self._call_erfa(erfa.sp00, ('tt',)) xp, yp = get_polar_motion(self) # Form the rotation matrix, CIRS to apparent [HA,Dec]. r = (rotation_matrix(longitude, 'z') @ rotation_matrix(-yp, 'x', unit=u.radian) @ rotation_matrix(-xp, 'y', unit=u.radian) @ rotation_matrix(theta + sp, 'z', unit=u.radian)) # Solve for angle. angle = np.arctan2(r[..., 0, 1], r[..., 0, 0]) << u.radian else: angle = longitude + (theta << u.radian) return Longitude(angle, u.hourangle) def _call_erfa(self, function, scales): # TODO: allow erfa functions to be used on Time with __array_ufunc__. erfa_parameters = [getattr(getattr(self, scale)._time, jd_part) for scale in scales for jd_part in ('jd1', 'jd2_filled')] result = function(*erfa_parameters) if self.masked: result[self.mask] = np.nan return result def get_delta_ut1_utc(self, iers_table=None, return_status=False): """Find UT1 - UTC differences by interpolating in IERS Table. Parameters ---------- iers_table : `~astropy.utils.iers.IERS`, optional Table containing UT1-UTC differences from IERS Bulletins A and/or B. Default: `~astropy.utils.iers.earth_orientation_table` (which in turn defaults to the combined version provided by `~astropy.utils.iers.IERS_Auto`). return_status : bool Whether to return status values. If `False` (default), iers raises `IndexError` if any time is out of the range covered by the IERS table. Returns ------- ut1_utc : float or float array UT1-UTC, interpolated in IERS Table status : int or int array Status values (if ``return_status=`True```):: ``astropy.utils.iers.FROM_IERS_B`` ``astropy.utils.iers.FROM_IERS_A`` ``astropy.utils.iers.FROM_IERS_A_PREDICTION`` ``astropy.utils.iers.TIME_BEFORE_IERS_RANGE`` ``astropy.utils.iers.TIME_BEYOND_IERS_RANGE`` Notes ----- In normal usage, UT1-UTC differences are calculated automatically on the first instance ut1 is needed. Examples -------- To check in code whether any times are before the IERS table range:: >>> from astropy.utils.iers import TIME_BEFORE_IERS_RANGE >>> t = Time(['1961-01-01', '2000-01-01'], scale='utc') >>> delta, status = t.get_delta_ut1_utc(return_status=True) # doctest: +REMOTE_DATA >>> status == TIME_BEFORE_IERS_RANGE # doctest: +REMOTE_DATA array([ True, False]...) """ if iers_table is None: from astropy.utils.iers import earth_orientation_table iers_table = earth_orientation_table.get() return iers_table.ut1_utc(self.utc, return_status=return_status) # Property for ERFA DUT arg = UT1 - UTC def _get_delta_ut1_utc(self, jd1=None, jd2=None): """ Get ERFA DUT arg = UT1 - UTC. This getter takes optional jd1 and jd2 args because it gets called that way when converting time scales. If delta_ut1_utc is not yet set, this will interpolate them from the the IERS table. """ # Sec. 4.3.1: the arg DUT is the quantity delta_UT1 = UT1 - UTC in # seconds. It is obtained from tables published by the IERS. if not hasattr(self, '_delta_ut1_utc'): from astropy.utils.iers import earth_orientation_table iers_table = earth_orientation_table.get() # jd1, jd2 are normally set (see above), except if delta_ut1_utc # is access directly; ensure we behave as expected for that case if jd1 is None: self_utc = self.utc jd1, jd2 = self_utc._time.jd1, self_utc._time.jd2_filled scale = 'utc' else: scale = self.scale # interpolate UT1-UTC in IERS table delta = iers_table.ut1_utc(jd1, jd2) # if we interpolated using UT1 jds, we may be off by one # second near leap seconds (and very slightly off elsewhere) if scale == 'ut1': # calculate UTC using the offset we got; the ERFA routine # is tolerant of leap seconds, so will do this right jd1_utc, jd2_utc = erfa.ut1utc(jd1, jd2, delta.to_value(u.s)) # calculate a better estimate using the nearly correct UTC delta = iers_table.ut1_utc(jd1_utc, jd2_utc) self._set_delta_ut1_utc(delta) return self._delta_ut1_utc def _set_delta_ut1_utc(self, val): del self.cache if hasattr(val, 'to'): # Matches Quantity but also TimeDelta. val = val.to(u.second).value val = self._match_shape(val) self._delta_ut1_utc = val # Note can't use @property because _get_delta_tdb_tt is explicitly # called with the optional jd1 and jd2 args. delta_ut1_utc = property(_get_delta_ut1_utc, _set_delta_ut1_utc) """UT1 - UTC time scale offset""" # Property for ERFA DTR arg = TDB - TT def _get_delta_tdb_tt(self, jd1=None, jd2=None): if not hasattr(self, '_delta_tdb_tt'): # If jd1 and jd2 are not provided (which is the case for property # attribute access) then require that the time scale is TT or TDB. # Otherwise the computations here are not correct. if jd1 is None or jd2 is None: if self.scale not in ('tt', 'tdb'): raise ValueError('Accessing the delta_tdb_tt attribute ' 'is only possible for TT or TDB time ' 'scales') else: jd1 = self._time.jd1 jd2 = self._time.jd2_filled # First go from the current input time (which is either # TDB or TT) to an approximate UT1. Since TT and TDB are # pretty close (few msec?), assume TT. Similarly, since the # UT1 terms are very small, use UTC instead of UT1. njd1, njd2 = erfa.tttai(jd1, jd2) njd1, njd2 = erfa.taiutc(njd1, njd2) # subtract 0.5, so UT is fraction of the day from midnight ut = day_frac(njd1 - 0.5, njd2)[1] if self.location is None: # Assume geocentric. self._delta_tdb_tt = erfa.dtdb(jd1, jd2, ut, 0., 0., 0.) else: location = self.location # Geodetic params needed for d_tdb_tt() lon = location.lon rxy = np.hypot(location.x, location.y) z = location.z self._delta_tdb_tt = erfa.dtdb( jd1, jd2, ut, lon.to_value(u.radian), rxy.to_value(u.km), z.to_value(u.km)) return self._delta_tdb_tt def _set_delta_tdb_tt(self, val): del self.cache if hasattr(val, 'to'): # Matches Quantity but also TimeDelta. val = val.to(u.second).value val = self._match_shape(val) self._delta_tdb_tt = val # Note can't use @property because _get_delta_tdb_tt is explicitly # called with the optional jd1 and jd2 args. delta_tdb_tt = property(_get_delta_tdb_tt, _set_delta_tdb_tt) """TDB - TT time scale offset""" def __sub__(self, other): # T - Tdelta = T # T - T = Tdelta other_is_delta = not isinstance(other, Time) if other_is_delta: # T - Tdelta # Check other is really a TimeDelta or something that can initialize. if not isinstance(other, TimeDelta): try: other = TimeDelta(other) except Exception: return NotImplemented # we need a constant scale to calculate, which is guaranteed for # TimeDelta, but not for Time (which can be UTC) out = self.replicate() if self.scale in other.SCALES: if other.scale not in (out.scale, None): other = getattr(other, out.scale) else: if other.scale is None: out._set_scale('tai') else: if self.scale not in TIME_TYPES[other.scale]: raise TypeError("Cannot subtract Time and TimeDelta instances " "with scales '{}' and '{}'" .format(self.scale, other.scale)) out._set_scale(other.scale) # remove attributes that are invalidated by changing time for attr in ('_delta_ut1_utc', '_delta_tdb_tt'): if hasattr(out, attr): delattr(out, attr) else: # T - T # the scales should be compatible (e.g., cannot convert TDB to LOCAL) if other.scale not in self.SCALES: raise TypeError("Cannot subtract Time instances " "with scales '{}' and '{}'" .format(self.scale, other.scale)) self_time = (self._time if self.scale in TIME_DELTA_SCALES else self.tai._time) # set up TimeDelta, subtraction to be done shortly out = TimeDelta(self_time.jd1, self_time.jd2, format='jd', scale=self_time.scale) if other.scale != out.scale: other = getattr(other, out.scale) jd1 = out._time.jd1 - other._time.jd1 jd2 = out._time.jd2 - other._time.jd2 out._time.jd1, out._time.jd2 = day_frac(jd1, jd2) if other_is_delta: # Go back to left-side scale if needed out._set_scale(self.scale) return out def __add__(self, other): # T + Tdelta = T # T + T = error if isinstance(other, Time): raise OperandTypeError(self, other, '+') # Check other is really a TimeDelta or something that can initialize. if not isinstance(other, TimeDelta): try: other = TimeDelta(other) except Exception: return NotImplemented # ideally, we calculate in the scale of the Time item, since that is # what we want the output in, but this may not be possible, since # TimeDelta cannot be converted arbitrarily out = self.replicate() if self.scale in other.SCALES: if other.scale not in (out.scale, None): other = getattr(other, out.scale) else: if other.scale is None: out._set_scale('tai') else: if self.scale not in TIME_TYPES[other.scale]: raise TypeError("Cannot add Time and TimeDelta instances " "with scales '{}' and '{}'" .format(self.scale, other.scale)) out._set_scale(other.scale) # remove attributes that are invalidated by changing time for attr in ('_delta_ut1_utc', '_delta_tdb_tt'): if hasattr(out, attr): delattr(out, attr) jd1 = out._time.jd1 + other._time.jd1 jd2 = out._time.jd2 + other._time.jd2 out._time.jd1, out._time.jd2 = day_frac(jd1, jd2) # Go back to left-side scale if needed out._set_scale(self.scale) return out # Reverse addition is possible: <something-Tdelta-ish> + T # but there is no case of <something> - T, so no __rsub__. def __radd__(self, other): return self.__add__(other) def __array_function__(self, function, types, args, kwargs): """ Wrap numpy functions. Parameters ---------- function : callable Numpy function to wrap types : iterable of classes Classes that provide an ``__array_function__`` override. Can in principle be used to interact with other classes. Below, mostly passed on to `~numpy.ndarray`, which can only interact with subclasses. args : tuple Positional arguments provided in the function call. kwargs : dict Keyword arguments provided in the function call. """ if function in CUSTOM_FUNCTIONS: f = CUSTOM_FUNCTIONS[function] return f(*args, **kwargs) elif function in UNSUPPORTED_FUNCTIONS: return NotImplemented else: return super().__array_function__(function, types, args, kwargs) def to_datetime(self, timezone=None): # TODO: this could likely go through to_value, as long as that # had an **kwargs part that was just passed on to _time. tm = self.replicate(format='datetime') return tm._shaped_like_input(tm._time.to_value(timezone)) to_datetime.__doc__ = TimeDatetime.to_value.__doc__ class TimeDeltaMissingUnitWarning(AstropyDeprecationWarning): """Warning for missing unit or format in TimeDelta""" pass class TimeDelta(TimeBase): """ Represent the time difference between two times. A TimeDelta object is initialized with one or more times in the ``val`` argument. The input times in ``val`` must conform to the specified ``format``. The optional ``val2`` time input should be supplied only for numeric input formats (e.g. JD) where very high precision (better than 64-bit precision) is required. The allowed values for ``format`` can be listed with:: >>> list(TimeDelta.FORMATS) ['sec', 'jd', 'datetime'] Note that for time differences, the scale can be among three groups: geocentric ('tai', 'tt', 'tcg'), barycentric ('tcb', 'tdb'), and rotational ('ut1'). Within each of these, the scales for time differences are the same. Conversion between geocentric and barycentric is possible, as there is only a scale factor change, but one cannot convert to or from 'ut1', as this requires knowledge of the actual times, not just their difference. For a similar reason, 'utc' is not a valid scale for a time difference: a UTC day is not always 86400 seconds. See also: - https://docs.astropy.org/en/stable/time/ - https://docs.astropy.org/en/stable/time/index.html#time-deltas Parameters ---------- val : sequence, ndarray, number, `~astropy.units.Quantity` or `~astropy.time.TimeDelta` object Value(s) to initialize the time difference(s). Any quantities will be converted appropriately (with care taken to avoid rounding errors for regular time units). val2 : sequence, ndarray, number, or `~astropy.units.Quantity`; optional Additional values, as needed to preserve precision. format : str, optional Format of input value(s). For numerical inputs without units, "jd" is assumed and values are interpreted as days. A deprecation warning is raised in this case. To avoid the warning, either specify the format or add units to the input values. scale : str, optional Time scale of input value(s), must be one of the following values: ('tdb', 'tt', 'ut1', 'tcg', 'tcb', 'tai'). If not given (or ``None``), the scale is arbitrary; when added or subtracted from a ``Time`` instance, it will be used without conversion. copy : bool, optional Make a copy of the input values """ SCALES = TIME_DELTA_SCALES """List of time delta scales.""" FORMATS = TIME_DELTA_FORMATS """Dict of time delta formats.""" info = TimeDeltaInfo() def __new__(cls, val, val2=None, format=None, scale=None, precision=None, in_subfmt=None, out_subfmt=None, location=None, copy=False): if isinstance(val, TimeDelta): self = val.replicate(format=format, copy=copy, cls=cls) else: self = super().__new__(cls) return self def __init__(self, val, val2=None, format=None, scale=None, copy=False): if isinstance(val, TimeDelta): if scale is not None: self._set_scale(scale) else: format = format or self._get_format(val) self._init_from_vals(val, val2, format, scale, copy) if scale is not None: self.SCALES = TIME_DELTA_TYPES[scale] @staticmethod def _get_format(val): if isinstance(val, timedelta): return 'datetime' if getattr(val, 'unit', None) is None: warn('Numerical value without unit or explicit format passed to' ' TimeDelta, assuming days', TimeDeltaMissingUnitWarning) return 'jd' def replicate(self, *args, **kwargs): out = super().replicate(*args, **kwargs) out.SCALES = self.SCALES return out def to_datetime(self): """ Convert to ``datetime.timedelta`` object. """ tm = self.replicate(format='datetime') return tm._shaped_like_input(tm._time.value) def _set_scale(self, scale): """ This is the key routine that actually does time scale conversions. This is not public and not connected to the read-only scale property. """ if scale == self.scale: return if scale not in self.SCALES: raise ValueError("Scale {!r} is not in the allowed scales {}" .format(scale, sorted(self.SCALES))) # For TimeDelta, there can only be a change in scale factor, # which is written as time2 - time1 = scale_offset * time1 scale_offset = SCALE_OFFSETS[(self.scale, scale)] if scale_offset is None: self._time.scale = scale else: jd1, jd2 = self._time.jd1, self._time.jd2 offset1, offset2 = day_frac(jd1, jd2, factor=scale_offset) self._time = self.FORMATS[self.format]( jd1 + offset1, jd2 + offset2, scale, self.precision, self.in_subfmt, self.out_subfmt, from_jd=True) def _add_sub(self, other, op): """Perform common elements of addition / subtraction for two delta times""" # If not a TimeDelta then see if it can be turned into a TimeDelta. if not isinstance(other, TimeDelta): try: other = TimeDelta(other) except Exception: return NotImplemented # the scales should be compatible (e.g., cannot convert TDB to TAI) if(self.scale is not None and self.scale not in other.SCALES or other.scale is not None and other.scale not in self.SCALES): raise TypeError("Cannot add TimeDelta instances with scales " "'{}' and '{}'".format(self.scale, other.scale)) # adjust the scale of other if the scale of self is set (or no scales) if self.scale is not None or other.scale is None: out = self.replicate() if other.scale is not None: other = getattr(other, self.scale) else: out = other.replicate() jd1 = op(self._time.jd1, other._time.jd1) jd2 = op(self._time.jd2, other._time.jd2) out._time.jd1, out._time.jd2 = day_frac(jd1, jd2) return out def __add__(self, other): # If other is a Time then use Time.__add__ to do the calculation. if isinstance(other, Time): return other.__add__(self) return self._add_sub(other, operator.add) def __sub__(self, other): # TimeDelta - Time is an error if isinstance(other, Time): raise OperandTypeError(self, other, '-') return self._add_sub(other, operator.sub) def __radd__(self, other): return self.__add__(other) def __rsub__(self, other): out = self.__sub__(other) return -out def __neg__(self): """Negation of a `TimeDelta` object.""" new = self.copy() new._time.jd1 = -self._time.jd1 new._time.jd2 = -self._time.jd2 return new def __abs__(self): """Absolute value of a `TimeDelta` object.""" jd1, jd2 = self._time.jd1, self._time.jd2 negative = jd1 + jd2 < 0 new = self.copy() new._time.jd1 = np.where(negative, -jd1, jd1) new._time.jd2 = np.where(negative, -jd2, jd2) return new def __mul__(self, other): """Multiplication of `TimeDelta` objects by numbers/arrays.""" # Check needed since otherwise the self.jd1 * other multiplication # would enter here again (via __rmul__) if isinstance(other, Time): raise OperandTypeError(self, other, '*') elif ((isinstance(other, u.UnitBase) and other == u.dimensionless_unscaled) or (isinstance(other, str) and other == '')): return self.copy() # If other is something consistent with a dimensionless quantity # (could just be a float or an array), then we can just multiple in. try: other = u.Quantity(other, u.dimensionless_unscaled, copy=False) except Exception: # If not consistent with a dimensionless quantity, try downgrading # self to a quantity and see if things work. try: return self.to(u.day) * other except Exception: # The various ways we could multiply all failed; # returning NotImplemented to give other a final chance. return NotImplemented jd1, jd2 = day_frac(self.jd1, self.jd2, factor=other.value) out = TimeDelta(jd1, jd2, format='jd', scale=self.scale) if self.format != 'jd': out = out.replicate(format=self.format) return out def __rmul__(self, other): """Multiplication of numbers/arrays with `TimeDelta` objects.""" return self.__mul__(other) def __truediv__(self, other): """Division of `TimeDelta` objects by numbers/arrays.""" # Cannot do __mul__(1./other) as that looses precision if ((isinstance(other, u.UnitBase) and other == u.dimensionless_unscaled) or (isinstance(other, str) and other == '')): return self.copy() # If other is something consistent with a dimensionless quantity # (could just be a float or an array), then we can just divide in. try: other = u.Quantity(other, u.dimensionless_unscaled, copy=False) except Exception: # If not consistent with a dimensionless quantity, try downgrading # self to a quantity and see if things work. try: return self.to(u.day) / other except Exception: # The various ways we could divide all failed; # returning NotImplemented to give other a final chance. return NotImplemented jd1, jd2 = day_frac(self.jd1, self.jd2, divisor=other.value) out = TimeDelta(jd1, jd2, format='jd', scale=self.scale) if self.format != 'jd': out = out.replicate(format=self.format) return out def __rtruediv__(self, other): """Division by `TimeDelta` objects of numbers/arrays.""" # Here, we do not have to worry about returning NotImplemented, # since other has already had a chance to look at us. return other / self.to(u.day) def to(self, unit, equivalencies=[]): """ Convert to a quantity in the specified unit. Parameters ---------- unit : unit-like The unit to convert to. equivalencies : list of tuple A list of equivalence pairs to try if the units are not directly convertible (see :ref:`astropy:unit_equivalencies`). If `None`, no equivalencies will be applied at all, not even any set globallyq or within a context. Returns ------- quantity : `~astropy.units.Quantity` The quantity in the units specified. See also -------- to_value : get the numerical value in a given unit. """ return u.Quantity(self._time.jd1 + self._time.jd2, u.day).to(unit, equivalencies=equivalencies) def to_value(self, *args, **kwargs): """Get time delta values expressed in specified output format or unit. This method is flexible and handles both conversion to a specified ``TimeDelta`` format / sub-format AND conversion to a specified unit. If positional argument(s) are provided then the first one is checked to see if it is a valid ``TimeDelta`` format, and next it is checked to see if it is a valid unit or unit string. To convert to a ``TimeDelta`` format and optional sub-format the options are:: tm = TimeDelta(1.0 * u.s) tm.to_value('jd') # equivalent of tm.jd tm.to_value('jd', 'decimal') # convert to 'jd' as a Decimal object tm.to_value('jd', subfmt='decimal') tm.to_value(format='jd', subfmt='decimal') To convert to a unit with optional equivalencies, the options are:: tm.to_value('hr') # convert to u.hr (hours) tm.to_value('hr', []) # specify equivalencies as a positional arg tm.to_value('hr', equivalencies=[]) tm.to_value(unit='hr', equivalencies=[]) The built-in `~astropy.time.TimeDelta` options for ``format`` are: {'jd', 'sec', 'datetime'}. For the two numerical formats 'jd' and 'sec', the available ``subfmt`` options are: {'float', 'long', 'decimal', 'str', 'bytes'}. Here, 'long' uses ``numpy.longdouble`` for somewhat enhanced precision (with the enhancement depending on platform), and 'decimal' instances of :class:`decimal.Decimal` for full precision. For the 'str' and 'bytes' sub-formats, the number of digits is also chosen such that time values are represented accurately. Default: as set by ``out_subfmt`` (which by default picks the first available for a given format, i.e., 'float'). Parameters ---------- format : str, optional The format in which one wants the `~astropy.time.TimeDelta` values. Default: the current format. subfmt : str, optional Possible sub-format in which the values should be given. Default: as set by ``out_subfmt`` (which by default picks the first available for a given format, i.e., 'float' or 'date_hms'). unit : `~astropy.units.UnitBase` instance or str, optional The unit in which the value should be given. equivalencies : list of tuple A list of equivalence pairs to try if the units are not directly convertible (see :ref:`astropy:unit_equivalencies`). If `None`, no equivalencies will be applied at all, not even any set globally or within a context. Returns ------- value : ndarray or scalar The value in the format or units specified. See also -------- to : Convert to a `~astropy.units.Quantity` instance in a given unit. value : The time value in the current format. """ if not (args or kwargs): raise TypeError('to_value() missing required format or unit argument') # TODO: maybe allow 'subfmt' also for units, keeping full precision # (effectively, by doing the reverse of quantity_day_frac)? # This way, only equivalencies could lead to possible precision loss. if ('format' in kwargs or (args != () and (args[0] is None or args[0] in self.FORMATS))): # Super-class will error with duplicate arguments, etc. return super().to_value(*args, **kwargs) # With positional arguments, we try parsing the first one as a unit, # so that on failure we can give a more informative exception. if args: try: unit = u.Unit(args[0]) except ValueError as exc: raise ValueError("first argument is not one of the known " "formats ({}) and failed to parse as a unit." .format(list(self.FORMATS))) from exc args = (unit,) + args[1:] return u.Quantity(self._time.jd1 + self._time.jd2, u.day).to_value(*args, **kwargs) def _make_value_equivalent(self, item, value): """Coerce setitem value into an equivalent TimeDelta object""" if not isinstance(value, TimeDelta): try: value = self.__class__(value, scale=self.scale, format=self.format) except Exception as err: raise ValueError('cannot convert value to a compatible TimeDelta ' 'object: {}'.format(err)) return value def isclose(self, other, atol=None, rtol=0.0): """Returns a boolean or boolean array where two TimeDelta objects are element-wise equal within a time tolerance. This effectively evaluates the expression below:: abs(self - other) <= atol + rtol * abs(other) Parameters ---------- other : `~astropy.units.Quantity` or `~astropy.time.TimeDelta` Quantity or TimeDelta object for comparison. atol : `~astropy.units.Quantity` or `~astropy.time.TimeDelta` Absolute tolerance for equality with units of time (e.g. ``u.s`` or ``u.day``). Default is one bit in the 128-bit JD time representation, equivalent to about 20 picosecs. rtol : float Relative tolerance for equality """ try: other_day = other.to_value(u.day) except Exception as err: raise TypeError(f"'other' argument must support conversion to days: {err}") if atol is None: atol = np.finfo(float).eps * u.day if not isinstance(atol, (u.Quantity, TimeDelta)): raise TypeError("'atol' argument must be a Quantity or TimeDelta instance, got " f'{atol.__class__.__name__} instead') return np.isclose(self.to_value(u.day), other_day, rtol=rtol, atol=atol.to_value(u.day)) class ScaleValueError(Exception): pass def _make_array(val, copy=False): """ Take ``val`` and convert/reshape to an array. If ``copy`` is `True` then copy input values. Returns ------- val : ndarray Array version of ``val``. """ if isinstance(val, (tuple, list)) and len(val) > 0 and isinstance(val[0], Time): dtype = object else: dtype = None val = np.array(val, copy=copy, subok=True, dtype=dtype) # Allow only float64, string or object arrays as input # (object is for datetime, maybe add more specific test later?) # This also ensures the right byteorder for float64 (closes #2942). if val.dtype.kind == "f" and val.dtype.itemsize >= np.dtype(np.float64).itemsize: pass elif val.dtype.kind in 'OSUMaV': pass else: val = np.asanyarray(val, dtype=np.float64) return val def _check_for_masked_and_fill(val, val2): """ If ``val`` or ``val2`` are masked arrays then fill them and cast to ndarray. Returns a mask corresponding to the logical-or of masked elements in ``val`` and ``val2``. If neither is masked then the return ``mask`` is ``None``. If either ``val`` or ``val2`` are masked then they are replaced with filled versions of themselves. Parameters ---------- val : ndarray or MaskedArray Input val val2 : ndarray or MaskedArray Input val2 Returns ------- mask, val, val2: ndarray or None Mask: (None or bool ndarray), val, val2: ndarray """ def get_as_filled_ndarray(mask, val): """ Fill the given MaskedArray ``val`` from the first non-masked element in the array. This ensures that upstream Time initialization will succeed. Note that nothing happens if there are no masked elements. """ fill_value = None if np.any(val.mask): # Final mask is the logical-or of inputs mask = mask | val.mask # First unmasked element. If all elements are masked then # use fill_value=None from above which will use val.fill_value. # As long as the user has set this appropriately then all will # be fine. val_unmasked = val.compressed() # 1-d ndarray of unmasked values if len(val_unmasked) > 0: fill_value = val_unmasked[0] # Fill the input ``val``. If fill_value is None then this just returns # an ndarray view of val (no copy). val = val.filled(fill_value) return mask, val mask = False if isinstance(val, np.ma.MaskedArray): mask, val = get_as_filled_ndarray(mask, val) if isinstance(val2, np.ma.MaskedArray): mask, val2 = get_as_filled_ndarray(mask, val2) return mask, val, val2 class OperandTypeError(TypeError): def __init__(self, left, right, op=None): op_string = '' if op is None else f' for {op}' super().__init__( "Unsupported operand type(s){}: " "'{}' and '{}'".format(op_string, left.__class__.__name__, right.__class__.__name__)) def _check_leapsec(): global _LEAP_SECONDS_CHECK if _LEAP_SECONDS_CHECK != _LeapSecondsCheck.DONE: with _LEAP_SECONDS_LOCK: # There are three ways we can get here: # 1. First call (NOT_STARTED). # 2. Re-entrant call (RUNNING). We skip the initialisation # and don't worry about leap second errors. # 3. Another thread which raced with the first call # (RUNNING). The first thread has relinquished the # lock to us, so initialization is complete. if _LEAP_SECONDS_CHECK == _LeapSecondsCheck.NOT_STARTED: _LEAP_SECONDS_CHECK = _LeapSecondsCheck.RUNNING update_leap_seconds() _LEAP_SECONDS_CHECK = _LeapSecondsCheck.DONE def update_leap_seconds(files=None): """If the current ERFA leap second table is out of date, try to update it. Uses `astropy.utils.iers.LeapSeconds.auto_open` to try to find an up-to-date table. See that routine for the definition of "out of date". In order to make it safe to call this any time, all exceptions are turned into warnings, Parameters ---------- files : list of path-like, optional List of files/URLs to attempt to open. By default, uses defined by `astropy.utils.iers.LeapSeconds.auto_open`, which includes the table used by ERFA itself, so if that is up to date, nothing will happen. Returns ------- n_update : int Number of items updated. """ try: from astropy.utils import iers table = iers.LeapSeconds.auto_open(files) return erfa.leap_seconds.update(table) except Exception as exc: warn("leap-second auto-update failed due to the following " f"exception: {exc!r}", AstropyWarning) return 0
fae2457c8c0689cf59cec034633f409e51ed5514eb064542507a44710c7a83b6
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import fnmatch import time import re import datetime import warnings from decimal import Decimal from collections import OrderedDict, defaultdict import numpy as np import erfa from astropy.utils.decorators import lazyproperty, classproperty from astropy.utils.exceptions import AstropyDeprecationWarning import astropy.units as u from . import _parse_times from . import utils from .utils import day_frac, quantity_day_frac, two_sum, two_product from . import conf __all__ = ['TimeFormat', 'TimeJD', 'TimeMJD', 'TimeFromEpoch', 'TimeUnix', 'TimeUnixTai', 'TimeCxcSec', 'TimeGPS', 'TimeDecimalYear', 'TimePlotDate', 'TimeUnique', 'TimeDatetime', 'TimeString', 'TimeISO', 'TimeISOT', 'TimeFITS', 'TimeYearDayTime', 'TimeEpochDate', 'TimeBesselianEpoch', 'TimeJulianEpoch', 'TimeDeltaFormat', 'TimeDeltaSec', 'TimeDeltaJD', 'TimeEpochDateString', 'TimeBesselianEpochString', 'TimeJulianEpochString', 'TIME_FORMATS', 'TIME_DELTA_FORMATS', 'TimezoneInfo', 'TimeDeltaDatetime', 'TimeDatetime64', 'TimeYMDHMS', 'TimeNumeric', 'TimeDeltaNumeric'] __doctest_skip__ = ['TimePlotDate'] # These both get filled in at end after TimeFormat subclasses defined. # Use an OrderedDict to fix the order in which formats are tried. # This ensures, e.g., that 'isot' gets tried before 'fits'. TIME_FORMATS = OrderedDict() TIME_DELTA_FORMATS = OrderedDict() # Translations between deprecated FITS timescales defined by # Rots et al. 2015, A&A 574:A36, and timescales used here. FITS_DEPRECATED_SCALES = {'TDT': 'tt', 'ET': 'tt', 'GMT': 'utc', 'UT': 'utc', 'IAT': 'tai'} def _regexify_subfmts(subfmts): """ Iterate through each of the sub-formats and try substituting simple regular expressions for the strptime codes for year, month, day-of-month, hour, minute, second. If no % characters remain then turn the final string into a compiled regex. This assumes time formats do not have a % in them. This is done both to speed up parsing of strings and to allow mixed formats where strptime does not quite work well enough. """ new_subfmts = [] for subfmt_tuple in subfmts: subfmt_in = subfmt_tuple[1] if isinstance(subfmt_in, str): for strptime_code, regex in (('%Y', r'(?P<year>\d\d\d\d)'), ('%m', r'(?P<mon>\d{1,2})'), ('%d', r'(?P<mday>\d{1,2})'), ('%H', r'(?P<hour>\d{1,2})'), ('%M', r'(?P<min>\d{1,2})'), ('%S', r'(?P<sec>\d{1,2})')): subfmt_in = subfmt_in.replace(strptime_code, regex) if '%' not in subfmt_in: subfmt_tuple = (subfmt_tuple[0], re.compile(subfmt_in + '$'), subfmt_tuple[2]) new_subfmts.append(subfmt_tuple) return tuple(new_subfmts) class TimeFormat: """ Base class for time representations. Parameters ---------- val1 : numpy ndarray, list, number, str, or bytes Values to initialize the time or times. Bytes are decoded as ascii. val2 : numpy ndarray, list, or number; optional Value(s) to initialize the time or times. Only used for numerical input, to help preserve precision. scale : str Time scale of input value(s) precision : int Precision for seconds as floating point in_subfmt : str Select subformat for inputting string times out_subfmt : str Select subformat for outputting string times from_jd : bool If true then val1, val2 are jd1, jd2 """ _default_scale = 'utc' # As of astropy 0.4 subfmts = () _registry = TIME_FORMATS def __init__(self, val1, val2, scale, precision, in_subfmt, out_subfmt, from_jd=False): self.scale = scale # validation of scale done later with _check_scale self.precision = precision self.in_subfmt = in_subfmt self.out_subfmt = out_subfmt self._jd1, self._jd2 = None, None if from_jd: self.jd1 = val1 self.jd2 = val2 else: val1, val2 = self._check_val_type(val1, val2) self.set_jds(val1, val2) def __init_subclass__(cls, **kwargs): # Register time formats that define a name, but leave out astropy_time since # it is not a user-accessible format and is only used for initialization into # a different format. if 'name' in cls.__dict__ and cls.name != 'astropy_time': # FIXME: check here that we're not introducing a collision with # an existing method or attribute; problem is it could be either # astropy.time.Time or astropy.time.TimeDelta, and at the point # where this is run neither of those classes have necessarily been # constructed yet. if 'value' in cls.__dict__ and not hasattr(cls.value, "fget"): raise ValueError("If defined, 'value' must be a property") cls._registry[cls.name] = cls # If this class defines its own subfmts, preprocess the definitions. if 'subfmts' in cls.__dict__: cls.subfmts = _regexify_subfmts(cls.subfmts) return super().__init_subclass__(**kwargs) @classmethod def _get_allowed_subfmt(cls, subfmt): """Get an allowed subfmt for this class, either the input ``subfmt`` if this is valid or '*' as a default. This method gets used in situations where the format of an existing Time object is changing and so the out_ or in_subfmt may need to be coerced to the default '*' if that ``subfmt`` is no longer valid. """ try: cls._select_subfmts(subfmt) except ValueError: subfmt = '*' return subfmt @property def in_subfmt(self): return self._in_subfmt @in_subfmt.setter def in_subfmt(self, subfmt): # Validate subfmt value for this class, raises ValueError if not. self._select_subfmts(subfmt) self._in_subfmt = subfmt @property def out_subfmt(self): return self._out_subfmt @out_subfmt.setter def out_subfmt(self, subfmt): # Validate subfmt value for this class, raises ValueError if not. self._select_subfmts(subfmt) self._out_subfmt = subfmt @property def jd1(self): return self._jd1 @jd1.setter def jd1(self, jd1): self._jd1 = _validate_jd_for_storage(jd1) if self._jd2 is not None: self._jd1, self._jd2 = _broadcast_writeable(self._jd1, self._jd2) @property def jd2(self): return self._jd2 @jd2.setter def jd2(self, jd2): self._jd2 = _validate_jd_for_storage(jd2) if self._jd1 is not None: self._jd1, self._jd2 = _broadcast_writeable(self._jd1, self._jd2) def __len__(self): return len(self.jd1) @property def scale(self): """Time scale""" self._scale = self._check_scale(self._scale) return self._scale @scale.setter def scale(self, val): self._scale = val def mask_if_needed(self, value): if self.masked: value = np.ma.array(value, mask=self.mask, copy=False) return value @property def mask(self): if 'mask' not in self.cache: self.cache['mask'] = np.isnan(self.jd2) if self.cache['mask'].shape: self.cache['mask'].flags.writeable = False return self.cache['mask'] @property def masked(self): if 'masked' not in self.cache: self.cache['masked'] = bool(np.any(self.mask)) return self.cache['masked'] @property def jd2_filled(self): return np.nan_to_num(self.jd2) if self.masked else self.jd2 @lazyproperty def cache(self): """ Return the cache associated with this instance. """ return defaultdict(dict) def _check_val_type(self, val1, val2): """Input value validation, typically overridden by derived classes""" # val1 cannot contain nan, but val2 can contain nan isfinite1 = np.isfinite(val1) if val1.size > 1: # Calling .all() on a scalar is surprisingly slow isfinite1 = isfinite1.all() # Note: arr.all() about 3x faster than np.all(arr) elif val1.size == 0: isfinite1 = False ok1 = (val1.dtype.kind == 'f' and val1.dtype.itemsize >= 8 and isfinite1 or val1.size == 0) ok2 = val2 is None or ( val2.dtype.kind == 'f' and val2.dtype.itemsize >= 8 and not np.any(np.isinf(val2))) or val2.size == 0 if not (ok1 and ok2): raise TypeError('Input values for {} class must be finite doubles' .format(self.name)) if getattr(val1, 'unit', None) is not None: # Convert any quantity-likes to days first, attempting to be # careful with the conversion, so that, e.g., large numbers of # seconds get converted without losing precision because # 1/86400 is not exactly representable as a float. val1 = u.Quantity(val1, copy=False) if val2 is not None: val2 = u.Quantity(val2, copy=False) try: val1, val2 = quantity_day_frac(val1, val2) except u.UnitsError: raise u.UnitConversionError( "only quantities with time units can be " "used to instantiate Time instances.") # We now have days, but the format may expect another unit. # On purpose, multiply with 1./day_unit because typically it is # 1./erfa.DAYSEC, and inverting it recovers the integer. # (This conversion will get undone in format's set_jds, hence # there may be room for optimizing this.) factor = 1. / getattr(self, 'unit', 1.) if factor != 1.: val1, carry = two_product(val1, factor) carry += val2 * factor val1, val2 = two_sum(val1, carry) elif getattr(val2, 'unit', None) is not None: raise TypeError('Cannot mix float and Quantity inputs') if val2 is None: val2 = np.array(0, dtype=val1.dtype) def asarray_or_scalar(val): """ Remove ndarray subclasses since for jd1/jd2 we want a pure ndarray or a Python or numpy scalar. """ return np.asarray(val) if isinstance(val, np.ndarray) else val return asarray_or_scalar(val1), asarray_or_scalar(val2) def _check_scale(self, scale): """ Return a validated scale value. If there is a class attribute 'scale' then that defines the default / required time scale for this format. In this case if a scale value was provided that needs to match the class default, otherwise return the class default. Otherwise just make sure that scale is in the allowed list of scales. Provide a different error message if `None` (no value) was supplied. """ if scale is None: scale = self._default_scale if scale not in TIME_SCALES: raise ScaleValueError("Scale value '{}' not in " "allowed values {}" .format(scale, TIME_SCALES)) return scale def set_jds(self, val1, val2): """ Set internal jd1 and jd2 from val1 and val2. Must be provided by derived classes. """ raise NotImplementedError def to_value(self, parent=None, out_subfmt=None): """ Return time representation from internal jd1 and jd2 in specified ``out_subfmt``. This is the base method that ignores ``parent`` and uses the ``value`` property to compute the output. This is done by temporarily setting ``self.out_subfmt`` and calling ``self.value``. This is required for legacy Format subclasses prior to astropy 4.0 New code should instead implement the value functionality in ``to_value()`` and then make the ``value`` property be a simple call to ``self.to_value()``. Parameters ---------- parent : object Parent `~astropy.time.Time` object associated with this `~astropy.time.TimeFormat` object out_subfmt : str or None Output subformt (use existing self.out_subfmt if `None`) Returns ------- value : numpy.array, numpy.ma.array Array or masked array of formatted time representation values """ # Get value via ``value`` property, overriding out_subfmt temporarily if needed. if out_subfmt is not None: out_subfmt_orig = self.out_subfmt try: self.out_subfmt = out_subfmt value = self.value finally: self.out_subfmt = out_subfmt_orig else: value = self.value return self.mask_if_needed(value) @property def value(self): raise NotImplementedError @classmethod def _select_subfmts(cls, pattern): """ Return a list of subformats where name matches ``pattern`` using fnmatch. If no subformat matches pattern then a ValueError is raised. A special case is a format with no allowed subformats, i.e. subfmts=(), and pattern='*'. This is OK and happens when this method is used for validation of an out_subfmt. """ if not isinstance(pattern, str): raise ValueError('subfmt attribute must be a string') elif pattern == '*': return cls.subfmts subfmts = [x for x in cls.subfmts if fnmatch.fnmatchcase(x[0], pattern)] if len(subfmts) == 0: if len(cls.subfmts) == 0: raise ValueError(f'subformat not allowed for format {cls.name}') else: subfmt_names = [x[0] for x in cls.subfmts] raise ValueError(f'subformat {pattern!r} must match one of ' f'{subfmt_names} for format {cls.name}') return subfmts class TimeNumeric(TimeFormat): subfmts = ( ('float', np.float64, None, np.add), ('long', np.longdouble, utils.longdouble_to_twoval, utils.twoval_to_longdouble), ('decimal', np.object_, utils.decimal_to_twoval, utils.twoval_to_decimal), ('str', np.str_, utils.decimal_to_twoval, utils.twoval_to_string), ('bytes', np.bytes_, utils.bytes_to_twoval, utils.twoval_to_bytes), ) def _check_val_type(self, val1, val2): """Input value validation, typically overridden by derived classes""" # Save original state of val2 because the super()._check_val_type below # may change val2 from None to np.array(0). The value is saved in order # to prevent a useless and slow call to np.result_type() below in the # most common use-case of providing only val1. orig_val2_is_none = val2 is None if val1.dtype.kind == 'f': val1, val2 = super()._check_val_type(val1, val2) elif (not orig_val2_is_none or not (val1.dtype.kind in 'US' or (val1.dtype.kind == 'O' and all(isinstance(v, Decimal) for v in val1.flat)))): raise TypeError( 'for {} class, input should be doubles, string, or Decimal, ' 'and second values are only allowed for doubles.' .format(self.name)) val_dtype = (val1.dtype if orig_val2_is_none else np.result_type(val1.dtype, val2.dtype)) subfmts = self._select_subfmts(self.in_subfmt) for subfmt, dtype, convert, _ in subfmts: if np.issubdtype(val_dtype, dtype): break else: raise ValueError('input type not among selected sub-formats.') if convert is not None: try: val1, val2 = convert(val1, val2) except Exception: raise TypeError( 'for {} class, input should be (long) doubles, string, ' 'or Decimal, and second values are only allowed for ' '(long) doubles.'.format(self.name)) return val1, val2 def to_value(self, jd1=None, jd2=None, parent=None, out_subfmt=None): """ Return time representation from internal jd1 and jd2. Subclasses that require ``parent`` or to adjust the jds should override this method. """ # TODO: do this in __init_subclass__? if self.__class__.value.fget is not self.__class__.to_value: return self.value if jd1 is None: jd1 = self.jd1 if jd2 is None: jd2 = self.jd2 if out_subfmt is None: out_subfmt = self.out_subfmt subfmt = self._select_subfmts(out_subfmt)[0] kwargs = {} if subfmt[0] in ('str', 'bytes'): unit = getattr(self, 'unit', 1) digits = int(np.ceil(np.log10(unit / np.finfo(float).eps))) # TODO: allow a way to override the format. kwargs['fmt'] = f'.{digits}f' value = subfmt[3](jd1, jd2, **kwargs) return self.mask_if_needed(value) value = property(to_value) class TimeJD(TimeNumeric): """ Julian Date time format. This represents the number of days since the beginning of the Julian Period. For example, 2451544.5 in JD is midnight on January 1, 2000. """ name = 'jd' def set_jds(self, val1, val2): self._check_scale(self._scale) # Validate scale. self.jd1, self.jd2 = day_frac(val1, val2) class TimeMJD(TimeNumeric): """ Modified Julian Date time format. This represents the number of days since midnight on November 17, 1858. For example, 51544.0 in MJD is midnight on January 1, 2000. """ name = 'mjd' def set_jds(self, val1, val2): self._check_scale(self._scale) # Validate scale. jd1, jd2 = day_frac(val1, val2) jd1 += erfa.DJM0 # erfa.DJM0=2400000.5 (from erfam.h). self.jd1, self.jd2 = day_frac(jd1, jd2) def to_value(self, **kwargs): jd1 = self.jd1 - erfa.DJM0 # This cannot lose precision. jd2 = self.jd2 return super().to_value(jd1=jd1, jd2=jd2, **kwargs) value = property(to_value) class TimeDecimalYear(TimeNumeric): """ Time as a decimal year, with integer values corresponding to midnight of the first day of each year. For example 2000.5 corresponds to the ISO time '2000-07-02 00:00:00'. """ name = 'decimalyear' def set_jds(self, val1, val2): self._check_scale(self._scale) # Validate scale. sum12, err12 = two_sum(val1, val2) iy_start = np.trunc(sum12).astype(int) extra, y_frac = two_sum(sum12, -iy_start) y_frac += extra + err12 val = (val1 + val2).astype(np.double) iy_start = np.trunc(val).astype(int) imon = np.ones_like(iy_start) iday = np.ones_like(iy_start) ihr = np.zeros_like(iy_start) imin = np.zeros_like(iy_start) isec = np.zeros_like(y_frac) # Possible enhancement: use np.unique to only compute start, stop # for unique values of iy_start. scale = self.scale.upper().encode('ascii') jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday, ihr, imin, isec) jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday, ihr, imin, isec) t_start = Time(jd1_start, jd2_start, scale=self.scale, format='jd') t_end = Time(jd1_end, jd2_end, scale=self.scale, format='jd') t_frac = t_start + (t_end - t_start) * y_frac self.jd1, self.jd2 = day_frac(t_frac.jd1, t_frac.jd2) def to_value(self, **kwargs): scale = self.scale.upper().encode('ascii') iy_start, ims, ids, ihmsfs = erfa.d2dtf(scale, 0, # precision=0 self.jd1, self.jd2_filled) imon = np.ones_like(iy_start) iday = np.ones_like(iy_start) ihr = np.zeros_like(iy_start) imin = np.zeros_like(iy_start) isec = np.zeros_like(self.jd1) # Possible enhancement: use np.unique to only compute start, stop # for unique values of iy_start. scale = self.scale.upper().encode('ascii') jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday, ihr, imin, isec) jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday, ihr, imin, isec) # Trying to be precise, but more than float64 not useful. dt = (self.jd1 - jd1_start) + (self.jd2 - jd2_start) dt_end = (jd1_end - jd1_start) + (jd2_end - jd2_start) decimalyear = iy_start + dt / dt_end return super().to_value(jd1=decimalyear, jd2=np.float64(0.0), **kwargs) value = property(to_value) class TimeFromEpoch(TimeNumeric): """ Base class for times that represent the interval from a particular epoch as a floating point multiple of a unit time interval (e.g. seconds or days). """ @classproperty(lazy=True) def _epoch(cls): # Ideally we would use `def epoch(cls)` here and not have the instance # property below. However, this breaks the sphinx API docs generation # in a way that was not resolved. See #10406 for details. return Time(cls.epoch_val, cls.epoch_val2, scale=cls.epoch_scale, format=cls.epoch_format) @property def epoch(self): """Reference epoch time from which the time interval is measured""" return self._epoch def set_jds(self, val1, val2): """ Initialize the internal jd1 and jd2 attributes given val1 and val2. For an TimeFromEpoch subclass like TimeUnix these will be floats giving the effective seconds since an epoch time (e.g. 1970-01-01 00:00:00). """ # Form new JDs based on epoch time + time from epoch (converted to JD). # One subtlety that might not be obvious is that 1.000 Julian days in # UTC can be 86400 or 86401 seconds. For the TimeUnix format the # assumption is that every day is exactly 86400 seconds, so this is, in # principle, doing the math incorrectly, *except* that it matches the # definition of Unix time which does not include leap seconds. # note: use divisor=1./self.unit, since this is either 1 or 1/86400, # and 1/86400 is not exactly representable as a float64, so multiplying # by that will cause rounding errors. (But inverting it as a float64 # recovers the exact number) day, frac = day_frac(val1, val2, divisor=1. / self.unit) jd1 = self.epoch.jd1 + day jd2 = self.epoch.jd2 + frac # For the usual case that scale is the same as epoch_scale, we only need # to ensure that abs(jd2) <= 0.5. Since abs(self.epoch.jd2) <= 0.5 and # abs(frac) <= 0.5, we can do simple (fast) checks and arithmetic here # without another call to day_frac(). Note also that `round(jd2.item())` # is about 10x faster than `np.round(jd2)`` for a scalar. if self.epoch.scale == self.scale: jd1_extra = np.round(jd2) if jd2.shape else round(jd2.item()) jd1 += jd1_extra jd2 -= jd1_extra self.jd1, self.jd2 = jd1, jd2 return # Create a temporary Time object corresponding to the new (jd1, jd2) in # the epoch scale (e.g. UTC for TimeUnix) then convert that to the # desired time scale for this object. # # A known limitation is that the transform from self.epoch_scale to # self.scale cannot involve any metadata like lat or lon. try: tm = getattr(Time(jd1, jd2, scale=self.epoch_scale, format='jd'), self.scale) except Exception as err: raise ScaleValueError("Cannot convert from '{}' epoch scale '{}'" "to specified scale '{}', got error:\n{}" .format(self.name, self.epoch_scale, self.scale, err)) from err self.jd1, self.jd2 = day_frac(tm._time.jd1, tm._time.jd2) def to_value(self, parent=None, **kwargs): # Make sure that scale is the same as epoch scale so we can just # subtract the epoch and convert if self.scale != self.epoch_scale: if parent is None: raise ValueError('cannot compute value without parent Time object') try: tm = getattr(parent, self.epoch_scale) except Exception as err: raise ScaleValueError("Cannot convert from '{}' epoch scale '{}'" "to specified scale '{}', got error:\n{}" .format(self.name, self.epoch_scale, self.scale, err)) from err jd1, jd2 = tm._time.jd1, tm._time.jd2 else: jd1, jd2 = self.jd1, self.jd2 # This factor is guaranteed to be exactly representable, which # means time_from_epoch1 is calculated exactly. factor = 1. / self.unit time_from_epoch1 = (jd1 - self.epoch.jd1) * factor time_from_epoch2 = (jd2 - self.epoch.jd2) * factor return super().to_value(jd1=time_from_epoch1, jd2=time_from_epoch2, **kwargs) value = property(to_value) @property def _default_scale(self): return self.epoch_scale class TimeUnix(TimeFromEpoch): """ Unix time (UTC): seconds from 1970-01-01 00:00:00 UTC, ignoring leap seconds. For example, 946684800.0 in Unix time is midnight on January 1, 2000. NOTE: this quantity is not exactly unix time and differs from the strict POSIX definition by up to 1 second on days with a leap second. POSIX unix time actually jumps backward by 1 second at midnight on leap second days while this class value is monotonically increasing at 86400 seconds per UTC day. """ name = 'unix' unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds) epoch_val = '1970-01-01 00:00:00' epoch_val2 = None epoch_scale = 'utc' epoch_format = 'iso' class TimeUnixTai(TimeUnix): """ Unix time (TAI): SI seconds elapsed since 1970-01-01 00:00:00 TAI (see caveats). This will generally differ from standard (UTC) Unix time by the cumulative integral number of leap seconds introduced into UTC since 1972-01-01 UTC plus the initial offset of 10 seconds at that date. This convention matches the definition of linux CLOCK_TAI (https://www.cl.cam.ac.uk/~mgk25/posix-clocks.html), and the Precision Time Protocol (https://en.wikipedia.org/wiki/Precision_Time_Protocol), which is also used by the White Rabbit protocol in High Energy Physics: https://white-rabbit.web.cern.ch. Caveats: - Before 1972, fractional adjustments to UTC were made, so the difference between ``unix`` and ``unix_tai`` time is no longer an integer. - Because of the fractional adjustments, to be very precise, ``unix_tai`` is the number of seconds since ``1970-01-01 00:00:00 TAI`` or equivalently ``1969-12-31 23:59:51.999918 UTC``. The difference between TAI and UTC at that epoch was 8.000082 sec. - On the day of a positive leap second the difference between ``unix`` and ``unix_tai`` times increases linearly through the day by 1.0. See also the documentation for the `~astropy.time.TimeUnix` class. - Negative leap seconds are possible, though none have been needed to date. Examples -------- >>> # get the current offset between TAI and UTC >>> from astropy.time import Time >>> t = Time('2020-01-01', scale='utc') >>> t.unix_tai - t.unix 37.0 >>> # Before 1972, the offset between TAI and UTC was not integer >>> t = Time('1970-01-01', scale='utc') >>> t.unix_tai - t.unix # doctest: +FLOAT_CMP 8.000082 >>> # Initial offset of 10 seconds in 1972 >>> t = Time('1972-01-01', scale='utc') >>> t.unix_tai - t.unix 10.0 """ name = 'unix_tai' epoch_val = '1970-01-01 00:00:00' epoch_scale = 'tai' class TimeCxcSec(TimeFromEpoch): """ Chandra X-ray Center seconds from 1998-01-01 00:00:00 TT. For example, 63072064.184 is midnight on January 1, 2000. """ name = 'cxcsec' unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds) epoch_val = '1998-01-01 00:00:00' epoch_val2 = None epoch_scale = 'tt' epoch_format = 'iso' class TimeGPS(TimeFromEpoch): """GPS time: seconds from 1980-01-06 00:00:00 UTC For example, 630720013.0 is midnight on January 1, 2000. Notes ===== This implementation is strictly a representation of the number of seconds (including leap seconds) since midnight UTC on 1980-01-06. GPS can also be considered as a time scale which is ahead of TAI by a fixed offset (to within about 100 nanoseconds). For details, see https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer """ name = 'gps' unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds) epoch_val = '1980-01-06 00:00:19' # above epoch is the same as Time('1980-01-06 00:00:00', scale='utc').tai epoch_val2 = None epoch_scale = 'tai' epoch_format = 'iso' class TimePlotDate(TimeFromEpoch): """ Matplotlib `~matplotlib.pyplot.plot_date` input: 1 + number of days from 0001-01-01 00:00:00 UTC This can be used directly in the matplotlib `~matplotlib.pyplot.plot_date` function:: >>> import matplotlib.pyplot as plt >>> jyear = np.linspace(2000, 2001, 20) >>> t = Time(jyear, format='jyear', scale='utc') >>> plt.plot_date(t.plot_date, jyear) >>> plt.gcf().autofmt_xdate() # orient date labels at a slant >>> plt.draw() For example, 730120.0003703703 is midnight on January 1, 2000. """ # This corresponds to the zero reference time for matplotlib plot_date(). # Note that TAI and UTC are equivalent at the reference time. name = 'plot_date' unit = 1.0 epoch_val = 1721424.5 # Time('0001-01-01 00:00:00', scale='tai').jd - 1 epoch_val2 = None epoch_scale = 'utc' epoch_format = 'jd' @lazyproperty def epoch(self): """Reference epoch time from which the time interval is measured""" try: # Matplotlib >= 3.3 has a get_epoch() function from matplotlib.dates import get_epoch except ImportError: # If no get_epoch() then the epoch is '0001-01-01' _epoch = self._epoch else: # Get the matplotlib date epoch as an ISOT string in UTC epoch_utc = get_epoch() from erfa import ErfaWarning with warnings.catch_warnings(): # Catch possible dubious year warnings from erfa warnings.filterwarnings('ignore', category=ErfaWarning) _epoch = Time(epoch_utc, scale='utc', format='isot') _epoch.format = 'jd' return _epoch class TimeStardate(TimeFromEpoch): """ Stardate: date units from 2318-07-05 12:00:00 UTC. For example, stardate 41153.7 is 00:52 on April 30, 2363. See http://trekguide.com/Stardates.htm#TNG for calculations and reference points """ name = 'stardate' unit = 0.397766856 # Stardate units per day epoch_val = '2318-07-05 11:00:00' # Date and time of stardate 00000.00 epoch_val2 = None epoch_scale = 'tai' epoch_format = 'iso' class TimeUnique(TimeFormat): """ Base class for time formats that can uniquely create a time object without requiring an explicit format specifier. This class does nothing but provide inheritance to identify a class as unique. """ class TimeAstropyTime(TimeUnique): """ Instantiate date from an Astropy Time object (or list thereof). This is purely for instantiating from a Time object. The output format is the same as the first time instance. """ name = 'astropy_time' def __new__(cls, val1, val2, scale, precision, in_subfmt, out_subfmt, from_jd=False): """ Use __new__ instead of __init__ to output a class instance that is the same as the class of the first Time object in the list. """ val1_0 = val1.flat[0] if not (isinstance(val1_0, Time) and all(type(val) is type(val1_0) for val in val1.flat)): raise TypeError('Input values for {} class must all be same ' 'astropy Time type.'.format(cls.name)) if scale is None: scale = val1_0.scale if val1.shape: vals = [getattr(val, scale)._time for val in val1] jd1 = np.concatenate([np.atleast_1d(val.jd1) for val in vals]) jd2 = np.concatenate([np.atleast_1d(val.jd2) for val in vals]) # Collect individual location values and merge into a single location. if any(tm.location is not None for tm in val1): if any(tm.location is None for tm in val1): raise ValueError('cannot concatenate times unless all locations ' 'are set or no locations are set') locations = [] for tm in val1: location = np.broadcast_to(tm.location, tm._time.jd1.shape, subok=True) locations.append(np.atleast_1d(location)) location = np.concatenate(locations) else: location = None else: val = getattr(val1_0, scale)._time jd1, jd2 = val.jd1, val.jd2 location = val1_0.location OutTimeFormat = val1_0._time.__class__ self = OutTimeFormat(jd1, jd2, scale, precision, in_subfmt, out_subfmt, from_jd=True) # Make a temporary hidden attribute to transfer location back to the # parent Time object where it needs to live. self._location = location return self class TimeDatetime(TimeUnique): """ Represent date as Python standard library `~datetime.datetime` object Example:: >>> from astropy.time import Time >>> from datetime import datetime >>> t = Time(datetime(2000, 1, 2, 12, 0, 0), scale='utc') >>> t.iso '2000-01-02 12:00:00.000' >>> t.tt.datetime datetime.datetime(2000, 1, 2, 12, 1, 4, 184000) """ name = 'datetime' def _check_val_type(self, val1, val2): if not all(isinstance(val, datetime.datetime) for val in val1.flat): raise TypeError('Input values for {} class must be ' 'datetime objects'.format(self.name)) if val2 is not None: raise ValueError( f'{self.name} objects do not accept a val2 but you provided {val2}') return val1, None def set_jds(self, val1, val2): """Convert datetime object contained in val1 to jd1, jd2""" # Iterate through the datetime objects, getting year, month, etc. iterator = np.nditer([val1, None, None, None, None, None, None], flags=['refs_ok', 'zerosize_ok'], op_dtypes=[None] + 5*[np.intc] + [np.double]) for val, iy, im, id, ihr, imin, dsec in iterator: dt = val.item() if dt.tzinfo is not None: dt = (dt - dt.utcoffset()).replace(tzinfo=None) iy[...] = dt.year im[...] = dt.month id[...] = dt.day ihr[...] = dt.hour imin[...] = dt.minute dsec[...] = dt.second + dt.microsecond / 1e6 jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'), *iterator.operands[1:]) self.jd1, self.jd2 = day_frac(jd1, jd2) def to_value(self, timezone=None, parent=None, out_subfmt=None): """ Convert to (potentially timezone-aware) `~datetime.datetime` object. If ``timezone`` is not ``None``, return a timezone-aware datetime object. Parameters ---------- timezone : {`~datetime.tzinfo`, None}, optional If not `None`, return timezone-aware datetime. Returns ------- `~datetime.datetime` If ``timezone`` is not ``None``, output will be timezone-aware. """ if out_subfmt is not None: # Out_subfmt not allowed for this format, so raise the standard # exception by trying to validate the value. self._select_subfmts(out_subfmt) if timezone is not None: if self._scale != 'utc': raise ScaleValueError("scale is {}, must be 'utc' when timezone " "is supplied.".format(self._scale)) # Rather than define a value property directly, we have a function, # since we want to be able to pass in timezone information. scale = self.scale.upper().encode('ascii') iys, ims, ids, ihmsfs = erfa.d2dtf(scale, 6, # 6 for microsec self.jd1, self.jd2_filled) ihrs = ihmsfs['h'] imins = ihmsfs['m'] isecs = ihmsfs['s'] ifracs = ihmsfs['f'] iterator = np.nditer([iys, ims, ids, ihrs, imins, isecs, ifracs, None], flags=['refs_ok', 'zerosize_ok'], op_dtypes=7*[None] + [object]) for iy, im, id, ihr, imin, isec, ifracsec, out in iterator: if isec >= 60: raise ValueError('Time {} is within a leap second but datetime ' 'does not support leap seconds' .format((iy, im, id, ihr, imin, isec, ifracsec))) if timezone is not None: out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec, tzinfo=TimezoneInfo()).astimezone(timezone) else: out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec) return self.mask_if_needed(iterator.operands[-1]) value = property(to_value) class TimeYMDHMS(TimeUnique): """ ymdhms: A Time format to represent Time as year, month, day, hour, minute, second (thus the name ymdhms). Acceptable inputs must have keys or column names in the "YMDHMS" set of ``year``, ``month``, ``day`` ``hour``, ``minute``, ``second``: - Dict with keys in the YMDHMS set - NumPy structured array, record array or astropy Table, or single row of those types, with column names in the YMDHMS set One can supply a subset of the YMDHMS values, for instance only 'year', 'month', and 'day'. Inputs have the following defaults:: 'month': 1, 'day': 1, 'hour': 0, 'minute': 0, 'second': 0 When the input is supplied as a ``dict`` then each value can be either a scalar value or an array. The values will be broadcast to a common shape. Example:: >>> from astropy.time import Time >>> t = Time({'year': 2015, 'month': 2, 'day': 3, ... 'hour': 12, 'minute': 13, 'second': 14.567}, ... scale='utc') >>> t.iso '2015-02-03 12:13:14.567' >>> t.ymdhms.year 2015 """ name = 'ymdhms' def _check_val_type(self, val1, val2): """ This checks inputs for the YMDHMS format. It is bit more complex than most format checkers because of the flexible input that is allowed. Also, it actually coerces ``val1`` into an appropriate dict of ndarrays that can be used easily by ``set_jds()``. This is useful because it makes it easy to get default values in that routine. Parameters ---------- val1 : ndarray or None val2 : ndarray or None Returns ------- val1_as_dict, val2 : val1 as dict or None, val2 is always None """ if val2 is not None: raise ValueError('val2 must be None for ymdhms format') ymdhms = ['year', 'month', 'day', 'hour', 'minute', 'second'] if val1.dtype.names: # Convert to a dict of ndarray val1_as_dict = {name: val1[name] for name in val1.dtype.names} elif val1.shape == (0,): # Input was empty list [], so set to None and set_jds will handle this return None, None elif (val1.dtype.kind == 'O' and val1.shape == () and isinstance(val1.item(), dict)): # Code gets here for input as a dict. The dict input # can be either scalar values or N-d arrays. # Extract the item (which is a dict) and broadcast values to the # same shape here. names = val1.item().keys() values = val1.item().values() val1_as_dict = {name: value for name, value in zip(names, np.broadcast_arrays(*values))} else: raise ValueError('input must be dict or table-like') # Check that the key names now are good. names = val1_as_dict.keys() required_names = ymdhms[:len(names)] def comma_repr(vals): return ', '.join(repr(val) for val in vals) bad_names = set(names) - set(ymdhms) if bad_names: raise ValueError(f'{comma_repr(bad_names)} not allowed as YMDHMS key name(s)') if set(names) != set(required_names): raise ValueError(f'for {len(names)} input key names ' f'you must supply {comma_repr(required_names)}') return val1_as_dict, val2 def set_jds(self, val1, val2): if val1 is None: # Input was empty list [] jd1 = np.array([], dtype=np.float64) jd2 = np.array([], dtype=np.float64) else: jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'), val1['year'], val1.get('month', 1), val1.get('day', 1), val1.get('hour', 0), val1.get('minute', 0), val1.get('second', 0)) self.jd1, self.jd2 = day_frac(jd1, jd2) @property def value(self): scale = self.scale.upper().encode('ascii') iys, ims, ids, ihmsfs = erfa.d2dtf(scale, 9, self.jd1, self.jd2_filled) out = np.empty(self.jd1.shape, dtype=[('year', 'i4'), ('month', 'i4'), ('day', 'i4'), ('hour', 'i4'), ('minute', 'i4'), ('second', 'f8')]) out['year'] = iys out['month'] = ims out['day'] = ids out['hour'] = ihmsfs['h'] out['minute'] = ihmsfs['m'] out['second'] = ihmsfs['s'] + ihmsfs['f'] * 10**(-9) out = out.view(np.recarray) return self.mask_if_needed(out) class TimezoneInfo(datetime.tzinfo): """ Subclass of the `~datetime.tzinfo` object, used in the to_datetime method to specify timezones. It may be safer in most cases to use a timezone database package like pytz rather than defining your own timezones - this class is mainly a workaround for users without pytz. """ @u.quantity_input(utc_offset=u.day, dst=u.day) def __init__(self, utc_offset=0 * u.day, dst=0 * u.day, tzname=None): """ Parameters ---------- utc_offset : `~astropy.units.Quantity`, optional Offset from UTC in days. Defaults to zero. dst : `~astropy.units.Quantity`, optional Daylight Savings Time offset in days. Defaults to zero (no daylight savings). tzname : str or None, optional Name of timezone Examples -------- >>> from datetime import datetime >>> from astropy.time import TimezoneInfo # Specifies a timezone >>> import astropy.units as u >>> utc = TimezoneInfo() # Defaults to UTC >>> utc_plus_one_hour = TimezoneInfo(utc_offset=1*u.hour) # UTC+1 >>> dt_aware = datetime(2000, 1, 1, 0, 0, 0, tzinfo=utc_plus_one_hour) >>> print(dt_aware) 2000-01-01 00:00:00+01:00 >>> print(dt_aware.astimezone(utc)) 1999-12-31 23:00:00+00:00 """ if utc_offset == 0 and dst == 0 and tzname is None: tzname = 'UTC' self._utcoffset = datetime.timedelta(utc_offset.to_value(u.day)) self._tzname = tzname self._dst = datetime.timedelta(dst.to_value(u.day)) def utcoffset(self, dt): return self._utcoffset def tzname(self, dt): return str(self._tzname) def dst(self, dt): return self._dst class TimeString(TimeUnique): """ Base class for string-like time representations. This class assumes that anything following the last decimal point to the right is a fraction of a second. **Fast C-based parser** Time format classes can take advantage of a fast C-based parser if the times are represented as fixed-format strings with year, month, day-of-month, hour, minute, second, OR year, day-of-year, hour, minute, second. This can be a factor of 20 or more faster than the pure Python parser. Fixed format means that the components always have the same number of characters. The Python parser will accept ``2001-9-2`` as a date, but the C parser would require ``2001-09-02``. A subclass in this case must define a class attribute ``fast_parser_pars`` which is a `dict` with all of the keys below. An inherited attribute is not checked, only an attribute in the class ``__dict__``. - ``delims`` (tuple of int): ASCII code for character at corresponding ``starts`` position (0 => no character) - ``starts`` (tuple of int): position where component starts (including delimiter if present). Use -1 for the month component for format that use day of year. - ``stops`` (tuple of int): position where component ends. Use -1 to continue to end of string, or for the month component for formats that use day of year. - ``break_allowed`` (tuple of int): if true (1) then the time string can legally end just before the corresponding component (e.g. "2000-01-01" is a valid time but "2000-01-01 12" is not). - ``has_day_of_year`` (int): 0 if dates have year, month, day; 1 if year, day-of-year """ def __init_subclass__(cls, **kwargs): if 'fast_parser_pars' in cls.__dict__: fpp = cls.fast_parser_pars fpp = np.array(list(zip(map(chr, fpp['delims']), fpp['starts'], fpp['stops'], fpp['break_allowed'])), _parse_times.dt_pars) if cls.fast_parser_pars['has_day_of_year']: fpp['start'][1] = fpp['stop'][1] = -1 cls._fast_parser = _parse_times.create_parser(fpp) super().__init_subclass__(**kwargs) def _check_val_type(self, val1, val2): if val1.dtype.kind not in ('S', 'U') and val1.size: raise TypeError(f'Input values for {self.name} class must be strings') if val2 is not None: raise ValueError( f'{self.name} objects do not accept a val2 but you provided {val2}') return val1, None def parse_string(self, timestr, subfmts): """Read time from a single string, using a set of possible formats.""" # Datetime components required for conversion to JD by ERFA, along # with the default values. components = ('year', 'mon', 'mday', 'hour', 'min', 'sec') defaults = (None, 1, 1, 0, 0, 0) # Assume that anything following "." on the right side is a # floating fraction of a second. try: idot = timestr.rindex('.') except Exception: fracsec = 0.0 else: timestr, fracsec = timestr[:idot], timestr[idot:] fracsec = float(fracsec) for _, strptime_fmt_or_regex, _ in subfmts: if isinstance(strptime_fmt_or_regex, str): try: tm = time.strptime(timestr, strptime_fmt_or_regex) except ValueError: continue else: vals = [getattr(tm, 'tm_' + component) for component in components] else: tm = re.match(strptime_fmt_or_regex, timestr) if tm is None: continue tm = tm.groupdict() vals = [int(tm.get(component, default)) for component, default in zip(components, defaults)] # Add fractional seconds vals[-1] = vals[-1] + fracsec return vals else: raise ValueError(f'Time {timestr} does not match {self.name} format') def set_jds(self, val1, val2): """Parse the time strings contained in val1 and set jd1, jd2""" # If specific input subformat is required then use the Python parser. # Also do this if Time format class does not define `use_fast_parser` or # if the fast parser is entirely disabled. Note that `use_fast_parser` # is ignored for format classes that don't have a fast parser. if (self.in_subfmt != '*' or '_fast_parser' not in self.__class__.__dict__ or conf.use_fast_parser == 'False'): jd1, jd2 = self.get_jds_python(val1, val2) else: try: jd1, jd2 = self.get_jds_fast(val1, val2) except Exception: # Fall through to the Python parser unless fast is forced. if conf.use_fast_parser == 'force': raise else: jd1, jd2 = self.get_jds_python(val1, val2) self.jd1 = jd1 self.jd2 = jd2 def get_jds_python(self, val1, val2): """Parse the time strings contained in val1 and get jd1, jd2""" # Select subformats based on current self.in_subfmt subfmts = self._select_subfmts(self.in_subfmt) # Be liberal in what we accept: convert bytes to ascii. # Here .item() is needed for arrays with entries of unequal length, # to strip trailing 0 bytes. to_string = (str if val1.dtype.kind == 'U' else lambda x: str(x.item(), encoding='ascii')) iterator = np.nditer([val1, None, None, None, None, None, None], flags=['zerosize_ok'], op_dtypes=[None] + 5 * [np.intc] + [np.double]) for val, iy, im, id, ihr, imin, dsec in iterator: val = to_string(val) iy[...], im[...], id[...], ihr[...], imin[...], dsec[...] = ( self.parse_string(val, subfmts)) jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'), *iterator.operands[1:]) jd1, jd2 = day_frac(jd1, jd2) return jd1, jd2 def get_jds_fast(self, val1, val2): """Use fast C parser to parse time strings in val1 and get jd1, jd2""" # Handle bytes or str input and convert to uint8. We need to the # dtype _parse_times.dt_u1 instead of uint8, since otherwise it is # not possible to create a gufunc with structured dtype output. # See note about ufunc type resolver in pyerfa/erfa/ufunc.c.templ. if val1.dtype.kind == 'U': # Note: val1.astype('S') is *very* slow, so we check ourselves # that the input is pure ASCII. val1_uint32 = val1.view((np.uint32, val1.dtype.itemsize // 4)) if np.any(val1_uint32 > 127): raise ValueError('input is not pure ASCII') # It might be possible to avoid making a copy via astype with # cleverness in parse_times.c but leave that for another day. chars = val1_uint32.astype(_parse_times.dt_u1) else: chars = val1.view((_parse_times.dt_u1, val1.dtype.itemsize)) # Call the fast parsing ufunc. time_struct = self._fast_parser(chars) jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'), time_struct['year'], time_struct['month'], time_struct['day'], time_struct['hour'], time_struct['minute'], time_struct['second']) return day_frac(jd1, jd2) def str_kwargs(self): """ Generator that yields a dict of values corresponding to the calendar date and time for the internal JD values. """ scale = self.scale.upper().encode('ascii'), iys, ims, ids, ihmsfs = erfa.d2dtf(scale, self.precision, self.jd1, self.jd2_filled) # Get the str_fmt element of the first allowed output subformat _, _, str_fmt = self._select_subfmts(self.out_subfmt)[0] yday = None has_yday = '{yday:' in str_fmt ihrs = ihmsfs['h'] imins = ihmsfs['m'] isecs = ihmsfs['s'] ifracs = ihmsfs['f'] for iy, im, id, ihr, imin, isec, ifracsec in np.nditer( [iys, ims, ids, ihrs, imins, isecs, ifracs], flags=['zerosize_ok']): if has_yday: yday = datetime.datetime(iy, im, id).timetuple().tm_yday yield {'year': int(iy), 'mon': int(im), 'day': int(id), 'hour': int(ihr), 'min': int(imin), 'sec': int(isec), 'fracsec': int(ifracsec), 'yday': yday} def format_string(self, str_fmt, **kwargs): """Write time to a string using a given format. By default, just interprets str_fmt as a format string, but subclasses can add to this. """ return str_fmt.format(**kwargs) @property def value(self): # Select the first available subformat based on current # self.out_subfmt subfmts = self._select_subfmts(self.out_subfmt) _, _, str_fmt = subfmts[0] # TODO: fix this ugly hack if self.precision > 0 and str_fmt.endswith('{sec:02d}'): str_fmt += '.{fracsec:0' + str(self.precision) + 'd}' # Try to optimize this later. Can't pre-allocate because length of # output could change, e.g. year rolls from 999 to 1000. outs = [] for kwargs in self.str_kwargs(): outs.append(str(self.format_string(str_fmt, **kwargs))) return np.array(outs).reshape(self.jd1.shape) class TimeISO(TimeString): """ ISO 8601 compliant date-time format "YYYY-MM-DD HH:MM:SS.sss...". For example, 2000-01-01 00:00:00.000 is midnight on January 1, 2000. The allowed subformats are: - 'date_hms': date + hours, mins, secs (and optional fractional secs) - 'date_hm': date + hours, mins - 'date': date """ name = 'iso' subfmts = (('date_hms', '%Y-%m-%d %H:%M:%S', # XXX To Do - use strftime for output ?? '{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}:{sec:02d}'), ('date_hm', '%Y-%m-%d %H:%M', '{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}'), ('date', '%Y-%m-%d', '{year:d}-{mon:02d}-{day:02d}')) # Define positions and starting delimiter for year, month, day, hour, # minute, seconds components of an ISO time. This is used by the fast # C-parser parse_ymdhms_times() # # "2000-01-12 13:14:15.678" # 01234567890123456789012 # yyyy-mm-dd hh:mm:ss.fff # Parsed as ('yyyy', '-mm', '-dd', ' hh', ':mm', ':ss', '.fff') fast_parser_pars = dict( delims=(0, ord('-'), ord('-'), ord(' '), ord(':'), ord(':'), ord('.')), starts=(0, 4, 7, 10, 13, 16, 19), stops=(3, 6, 9, 12, 15, 18, -1), # Break allowed *before* # y m d h m s f break_allowed=(0, 0, 0, 1, 0, 1, 1), has_day_of_year=0) def parse_string(self, timestr, subfmts): # Handle trailing 'Z' for UTC time if timestr.endswith('Z'): if self.scale != 'utc': raise ValueError("Time input terminating in 'Z' must have " "scale='UTC'") timestr = timestr[:-1] return super().parse_string(timestr, subfmts) class TimeISOT(TimeISO): """ ISO 8601 compliant date-time format "YYYY-MM-DDTHH:MM:SS.sss...". This is the same as TimeISO except for a "T" instead of space between the date and time. For example, 2000-01-01T00:00:00.000 is midnight on January 1, 2000. The allowed subformats are: - 'date_hms': date + hours, mins, secs (and optional fractional secs) - 'date_hm': date + hours, mins - 'date': date """ name = 'isot' subfmts = (('date_hms', '%Y-%m-%dT%H:%M:%S', '{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'), ('date_hm', '%Y-%m-%dT%H:%M', '{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}'), ('date', '%Y-%m-%d', '{year:d}-{mon:02d}-{day:02d}')) # See TimeISO for explanation fast_parser_pars = dict( delims=(0, ord('-'), ord('-'), ord('T'), ord(':'), ord(':'), ord('.')), starts=(0, 4, 7, 10, 13, 16, 19), stops=(3, 6, 9, 12, 15, 18, -1), # Break allowed *before* # y m d h m s f break_allowed=(0, 0, 0, 1, 0, 1, 1), has_day_of_year=0) class TimeYearDayTime(TimeISO): """ Year, day-of-year and time as "YYYY:DOY:HH:MM:SS.sss...". The day-of-year (DOY) goes from 001 to 365 (366 in leap years). For example, 2000:001:00:00:00.000 is midnight on January 1, 2000. The allowed subformats are: - 'date_hms': date + hours, mins, secs (and optional fractional secs) - 'date_hm': date + hours, mins - 'date': date """ name = 'yday' subfmts = (('date_hms', '%Y:%j:%H:%M:%S', '{year:d}:{yday:03d}:{hour:02d}:{min:02d}:{sec:02d}'), ('date_hm', '%Y:%j:%H:%M', '{year:d}:{yday:03d}:{hour:02d}:{min:02d}'), ('date', '%Y:%j', '{year:d}:{yday:03d}')) # Define positions and starting delimiter for year, month, day, hour, # minute, seconds components of an ISO time. This is used by the fast # C-parser parse_ymdhms_times() # # "2000:123:13:14:15.678" # 012345678901234567890 # yyyy:ddd:hh:mm:ss.fff # Parsed as ('yyyy', ':ddd', ':hh', ':mm', ':ss', '.fff') # # delims: character at corresponding `starts` position (0 => no character) # starts: position where component starts (including delimiter if present) # stops: position where component ends (-1 => continue to end of string) fast_parser_pars = dict( delims=(0, 0, ord(':'), ord(':'), ord(':'), ord(':'), ord('.')), starts=(0, -1, 4, 8, 11, 14, 17), stops=(3, -1, 7, 10, 13, 16, -1), # Break allowed before: # y m d h m s f break_allowed=(0, 0, 0, 1, 0, 1, 1), has_day_of_year=1) class TimeDatetime64(TimeISOT): name = 'datetime64' def _check_val_type(self, val1, val2): if not val1.dtype.kind == 'M': if val1.size > 0: raise TypeError('Input values for {} class must be ' 'datetime64 objects'.format(self.name)) else: val1 = np.array([], 'datetime64[D]') if val2 is not None: raise ValueError( f'{self.name} objects do not accept a val2 but you provided {val2}') return val1, None def set_jds(self, val1, val2): # If there are any masked values in the ``val1`` datetime64 array # ('NaT') then stub them with a valid date so downstream parse_string # will work. The value under the mask is arbitrary but a "modern" date # is good. mask = np.isnat(val1) masked = np.any(mask) if masked: val1 = val1.copy() val1[mask] = '2000' # Make sure M(onth) and Y(ear) dates will parse and convert to bytestring if val1.dtype.name in ['datetime64[M]', 'datetime64[Y]']: val1 = val1.astype('datetime64[D]') val1 = val1.astype('S') # Standard ISO string parsing now super().set_jds(val1, val2) # Finally apply mask if necessary if masked: self.jd2[mask] = np.nan @property def value(self): precision = self.precision self.precision = 9 ret = super().value self.precision = precision return ret.astype('datetime64') class TimeFITS(TimeString): """ FITS format: "[±Y]YYYY-MM-DD[THH:MM:SS[.sss]]". ISOT but can give signed five-digit year (mostly for negative years); The allowed subformats are: - 'date_hms': date + hours, mins, secs (and optional fractional secs) - 'date': date - 'longdate_hms': as 'date_hms', but with signed 5-digit year - 'longdate': as 'date', but with signed 5-digit year See Rots et al., 2015, A&A 574:A36 (arXiv:1409.7583). """ name = 'fits' subfmts = ( ('date_hms', (r'(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)T' r'(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)'), '{year:04d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'), ('date', r'(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)', '{year:04d}-{mon:02d}-{day:02d}'), ('longdate_hms', (r'(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)T' r'(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)'), '{year:+06d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'), ('longdate', r'(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)', '{year:+06d}-{mon:02d}-{day:02d}')) # Add the regex that parses the scale and possible realization. # Support for this is deprecated. Read old style but no longer write # in this style. subfmts = tuple( (subfmt[0], subfmt[1] + r'(\((?P<scale>\w+)(\((?P<realization>\w+)\))?\))?', subfmt[2]) for subfmt in subfmts) def parse_string(self, timestr, subfmts): """Read time and deprecated scale if present""" # Try parsing with any of the allowed sub-formats. for _, regex, _ in subfmts: tm = re.match(regex, timestr) if tm: break else: raise ValueError(f'Time {timestr} does not match {self.name} format') tm = tm.groupdict() # Scale and realization are deprecated and strings in this form # are no longer created. We issue a warning but still use the value. if tm['scale'] is not None: warnings.warn("FITS time strings should no longer have embedded time scale.", AstropyDeprecationWarning) # If a scale was given, translate from a possible deprecated # timescale identifier to the scale used by Time. fits_scale = tm['scale'].upper() scale = FITS_DEPRECATED_SCALES.get(fits_scale, fits_scale.lower()) if scale not in TIME_SCALES: raise ValueError("Scale {!r} is not in the allowed scales {}" .format(scale, sorted(TIME_SCALES))) # If no scale was given in the initialiser, set the scale to # that given in the string. Realization is ignored # and is only supported to allow old-style strings to be # parsed. if self._scale is None: self._scale = scale if scale != self.scale: raise ValueError("Input strings for {} class must all " "have consistent time scales." .format(self.name)) return [int(tm['year']), int(tm['mon']), int(tm['mday']), int(tm.get('hour', 0)), int(tm.get('min', 0)), float(tm.get('sec', 0.))] @property def value(self): """Convert times to strings, using signed 5 digit if necessary.""" if 'long' not in self.out_subfmt: # If we have times before year 0 or after year 9999, we can # output only in a "long" format, using signed 5-digit years. jd = self.jd1 + self.jd2 if jd.size and (jd.min() < 1721425.5 or jd.max() >= 5373484.5): self.out_subfmt = 'long' + self.out_subfmt return super().value class TimeEpochDate(TimeNumeric): """ Base class for support floating point Besselian and Julian epoch dates """ _default_scale = 'tt' # As of astropy 3.2, this is no longer 'utc'. def set_jds(self, val1, val2): self._check_scale(self._scale) # validate scale. epoch_to_jd = getattr(erfa, self.epoch_to_jd) jd1, jd2 = epoch_to_jd(val1 + val2) self.jd1, self.jd2 = day_frac(jd1, jd2) def to_value(self, **kwargs): jd_to_epoch = getattr(erfa, self.jd_to_epoch) value = jd_to_epoch(self.jd1, self.jd2) return super().to_value(jd1=value, jd2=np.float64(0.0), **kwargs) value = property(to_value) class TimeBesselianEpoch(TimeEpochDate): """Besselian Epoch year as floating point value(s) like 1950.0""" name = 'byear' epoch_to_jd = 'epb2jd' jd_to_epoch = 'epb' def _check_val_type(self, val1, val2): """Input value validation, typically overridden by derived classes""" if hasattr(val1, 'to') and hasattr(val1, 'unit') and val1.unit is not None: raise ValueError("Cannot use Quantities for 'byear' format, " "as the interpretation would be ambiguous. " "Use float with Besselian year instead. ") # FIXME: is val2 really okay here? return super()._check_val_type(val1, val2) class TimeJulianEpoch(TimeEpochDate): """Julian Epoch year as floating point value(s) like 2000.0""" name = 'jyear' unit = erfa.DJY # 365.25, the Julian year, for conversion to quantities epoch_to_jd = 'epj2jd' jd_to_epoch = 'epj' class TimeEpochDateString(TimeString): """ Base class to support string Besselian and Julian epoch dates such as 'B1950.0' or 'J2000.0' respectively. """ _default_scale = 'tt' # As of astropy 3.2, this is no longer 'utc'. def set_jds(self, val1, val2): epoch_prefix = self.epoch_prefix # Be liberal in what we accept: convert bytes to ascii. to_string = (str if val1.dtype.kind == 'U' else lambda x: str(x.item(), encoding='ascii')) iterator = np.nditer([val1, None], op_dtypes=[val1.dtype, np.double], flags=['zerosize_ok']) for val, years in iterator: try: time_str = to_string(val) epoch_type, year_str = time_str[0], time_str[1:] year = float(year_str) if epoch_type.upper() != epoch_prefix: raise ValueError except (IndexError, ValueError, UnicodeEncodeError): raise ValueError(f'Time {val} does not match {self.name} format') else: years[...] = year self._check_scale(self._scale) # validate scale. epoch_to_jd = getattr(erfa, self.epoch_to_jd) jd1, jd2 = epoch_to_jd(iterator.operands[-1]) self.jd1, self.jd2 = day_frac(jd1, jd2) @property def value(self): jd_to_epoch = getattr(erfa, self.jd_to_epoch) years = jd_to_epoch(self.jd1, self.jd2) # Use old-style format since it is a factor of 2 faster str_fmt = self.epoch_prefix + '%.' + str(self.precision) + 'f' outs = [str_fmt % year for year in years.flat] return np.array(outs).reshape(self.jd1.shape) class TimeBesselianEpochString(TimeEpochDateString): """Besselian Epoch year as string value(s) like 'B1950.0'""" name = 'byear_str' epoch_to_jd = 'epb2jd' jd_to_epoch = 'epb' epoch_prefix = 'B' class TimeJulianEpochString(TimeEpochDateString): """Julian Epoch year as string value(s) like 'J2000.0'""" name = 'jyear_str' epoch_to_jd = 'epj2jd' jd_to_epoch = 'epj' epoch_prefix = 'J' class TimeDeltaFormat(TimeFormat): """Base class for time delta representations""" _registry = TIME_DELTA_FORMATS def _check_scale(self, scale): """ Check that the scale is in the allowed list of scales, or is `None` """ if scale is not None and scale not in TIME_DELTA_SCALES: raise ScaleValueError("Scale value '{}' not in " "allowed values {}" .format(scale, TIME_DELTA_SCALES)) return scale class TimeDeltaNumeric(TimeDeltaFormat, TimeNumeric): def set_jds(self, val1, val2): self._check_scale(self._scale) # Validate scale. self.jd1, self.jd2 = day_frac(val1, val2, divisor=1. / self.unit) def to_value(self, **kwargs): # Note that 1/unit is always exactly representable, so the # following multiplications are exact. factor = 1. / self.unit jd1 = self.jd1 * factor jd2 = self.jd2 * factor return super().to_value(jd1=jd1, jd2=jd2, **kwargs) value = property(to_value) class TimeDeltaSec(TimeDeltaNumeric): """Time delta in SI seconds""" name = 'sec' unit = 1. / erfa.DAYSEC # for quantity input class TimeDeltaJD(TimeDeltaNumeric): """Time delta in Julian days (86400 SI seconds)""" name = 'jd' unit = 1. class TimeDeltaDatetime(TimeDeltaFormat, TimeUnique): """Time delta in datetime.timedelta""" name = 'datetime' def _check_val_type(self, val1, val2): if not all(isinstance(val, datetime.timedelta) for val in val1.flat): raise TypeError('Input values for {} class must be ' 'datetime.timedelta objects'.format(self.name)) if val2 is not None: raise ValueError( f'{self.name} objects do not accept a val2 but you provided {val2}') return val1, None def set_jds(self, val1, val2): self._check_scale(self._scale) # Validate scale. iterator = np.nditer([val1, None, None], flags=['refs_ok', 'zerosize_ok'], op_dtypes=[None, np.double, np.double]) day = datetime.timedelta(days=1) for val, jd1, jd2 in iterator: jd1[...], other = divmod(val.item(), day) jd2[...] = other / day self.jd1, self.jd2 = day_frac(iterator.operands[-2], iterator.operands[-1]) @property def value(self): iterator = np.nditer([self.jd1, self.jd2, None], flags=['refs_ok', 'zerosize_ok'], op_dtypes=[None, None, object]) for jd1, jd2, out in iterator: jd1_, jd2_ = day_frac(jd1, jd2) out[...] = datetime.timedelta(days=jd1_, microseconds=jd2_ * 86400 * 1e6) return self.mask_if_needed(iterator.operands[-1]) def _validate_jd_for_storage(jd): if isinstance(jd, (float, int)): return np.array(jd, dtype=np.float_) if (isinstance(jd, np.generic) and (jd.dtype.kind == 'f' and jd.dtype.itemsize <= 8 or jd.dtype.kind in 'iu')): return np.array(jd, dtype=np.float_) elif (isinstance(jd, np.ndarray) and jd.dtype.kind == 'f' and jd.dtype.itemsize == 8): return jd else: raise TypeError( f"JD values must be arrays (possibly zero-dimensional) " f"of floats but we got {jd!r} of type {type(jd)}") def _broadcast_writeable(jd1, jd2): if jd1.shape == jd2.shape: return jd1, jd2 # When using broadcast_arrays, *both* are flagged with # warn-on-write, even the one that wasn't modified, and # require "C" only clears the flag if it actually copied # anything. shape = np.broadcast(jd1, jd2).shape if jd1.shape == shape: s_jd1 = jd1 else: s_jd1 = np.require(np.broadcast_to(jd1, shape), requirements=["C", "W"]) if jd2.shape == shape: s_jd2 = jd2 else: s_jd2 = np.require(np.broadcast_to(jd2, shape), requirements=["C", "W"]) return s_jd1, s_jd2 # Import symbols from core.py that are used in this module. This succeeds # because __init__.py imports format.py just before core.py. from .core import Time, TIME_SCALES, TIME_DELTA_SCALES, ScaleValueError # noqa
b4d42de75c50996d8e073acb51a94f6ab9891605d765f4ed2183669d48bc13d6
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst __all__ = ['quantity_input'] import inspect from collections.abc import Sequence from functools import wraps from numbers import Number import numpy as np from . import _typing as T from .core import Unit, UnitBase, UnitsError, add_enabled_equivalencies, dimensionless_unscaled from .function.core import FunctionUnitBase from .physical import PhysicalType, get_physical_type from .quantity import Quantity from .structured import StructuredUnit NoneType = type(None) def _get_allowed_units(targets): """ From a list of target units (either as strings or unit objects) and physical types, return a list of Unit objects. """ allowed_units = [] for target in targets: try: unit = Unit(target) except (TypeError, ValueError): try: unit = get_physical_type(target)._unit except (TypeError, ValueError, KeyError): # KeyError for Enum raise ValueError(f"Invalid unit or physical type {target!r}.") from None allowed_units.append(unit) return allowed_units def _validate_arg_value(param_name, func_name, arg, targets, equivalencies, strict_dimensionless=False): """ Validates the object passed in to the wrapped function, ``arg``, with target unit or physical type, ``target``. """ if len(targets) == 0: return allowed_units = _get_allowed_units(targets) # If dimensionless is an allowed unit and the argument is unit-less, # allow numbers or numpy arrays with numeric dtypes if (dimensionless_unscaled in allowed_units and not strict_dimensionless and not hasattr(arg, "unit")): if isinstance(arg, Number): return elif (isinstance(arg, np.ndarray) and np.issubdtype(arg.dtype, np.number)): return for allowed_unit in allowed_units: try: is_equivalent = arg.unit.is_equivalent(allowed_unit, equivalencies=equivalencies) if is_equivalent: break except AttributeError: # Either there is no .unit or no .is_equivalent if hasattr(arg, "unit"): error_msg = ("a 'unit' attribute without an 'is_equivalent' method") else: error_msg = "no 'unit' attribute" raise TypeError(f"Argument '{param_name}' to function '{func_name}'" f" has {error_msg}. You should pass in an astropy " "Quantity instead.") else: error_msg = (f"Argument '{param_name}' to function '{func_name}' must " "be in units convertible to") if len(targets) > 1: targ_names = ", ".join([f"'{str(targ)}'" for targ in targets]) raise UnitsError(f"{error_msg} one of: {targ_names}.") else: raise UnitsError(f"{error_msg} '{str(targets[0])}'.") def _parse_annotation(target): if target in (None, NoneType, inspect._empty): return target # check if unit-like try: unit = Unit(target) except (TypeError, ValueError): try: ptype = get_physical_type(target) except (TypeError, ValueError, KeyError): # KeyError for Enum if isinstance(target, str): raise ValueError(f"invalid unit or physical type {target!r}.") from None else: return ptype else: return unit # could be a type hint origin = T.get_origin(target) if origin is T.Union: return [_parse_annotation(t) for t in T.get_args(target)] elif origin is not T.Annotated: # can't be Quantity[] return False # parse type hint cls, *annotations = T.get_args(target) if not issubclass(cls, Quantity) or not annotations: return False # get unit from type hint unit, *rest = annotations if not isinstance(unit, (UnitBase, PhysicalType)): return False return unit class QuantityInput: @classmethod def as_decorator(cls, func=None, **kwargs): r""" A decorator for validating the units of arguments to functions. Unit specifications can be provided as keyword arguments to the decorator, or by using function annotation syntax. Arguments to the decorator take precedence over any function annotations present. A `~astropy.units.UnitsError` will be raised if the unit attribute of the argument is not equivalent to the unit specified to the decorator or in the annotation. If the argument has no unit attribute, i.e. it is not a Quantity object, a `ValueError` will be raised unless the argument is an annotation. This is to allow non Quantity annotations to pass through. Where an equivalency is specified in the decorator, the function will be executed with that equivalency in force. Notes ----- The checking of arguments inside variable arguments to a function is not supported (i.e. \*arg or \**kwargs). The original function is accessible by the attributed ``__wrapped__``. See :func:`functools.wraps` for details. Examples -------- .. code-block:: python import astropy.units as u @u.quantity_input(myangle=u.arcsec) def myfunction(myangle): return myangle**2 .. code-block:: python import astropy.units as u @u.quantity_input def myfunction(myangle: u.arcsec): return myangle**2 Or using a unit-aware Quantity annotation. .. code-block:: python @u.quantity_input def myfunction(myangle: u.Quantity[u.arcsec]): return myangle**2 Also you can specify a return value annotation, which will cause the function to always return a `~astropy.units.Quantity` in that unit. .. code-block:: python import astropy.units as u @u.quantity_input def myfunction(myangle: u.arcsec) -> u.deg**2: return myangle**2 Using equivalencies:: import astropy.units as u @u.quantity_input(myenergy=u.eV, equivalencies=u.mass_energy()) def myfunction(myenergy): return myenergy**2 """ self = cls(**kwargs) if func is not None and not kwargs: return self(func) else: return self def __init__(self, func=None, strict_dimensionless=False, **kwargs): self.equivalencies = kwargs.pop('equivalencies', []) self.decorator_kwargs = kwargs self.strict_dimensionless = strict_dimensionless def __call__(self, wrapped_function): # Extract the function signature for the function we are wrapping. wrapped_signature = inspect.signature(wrapped_function) # Define a new function to return in place of the wrapped one @wraps(wrapped_function) def wrapper(*func_args, **func_kwargs): # Bind the arguments to our new function to the signature of the original. bound_args = wrapped_signature.bind(*func_args, **func_kwargs) # Iterate through the parameters of the original signature for param in wrapped_signature.parameters.values(): # We do not support variable arguments (*args, **kwargs) if param.kind in (inspect.Parameter.VAR_KEYWORD, inspect.Parameter.VAR_POSITIONAL): continue # Catch the (never triggered) case where bind relied on a default value. if (param.name not in bound_args.arguments and param.default is not param.empty): bound_args.arguments[param.name] = param.default # Get the value of this parameter (argument to new function) arg = bound_args.arguments[param.name] # Get target unit or physical type, either from decorator kwargs # or annotations if param.name in self.decorator_kwargs: targets = self.decorator_kwargs[param.name] is_annotation = False else: targets = param.annotation is_annotation = True # parses to unit if it's an annotation (or list thereof) targets = _parse_annotation(targets) # If the targets is empty, then no target units or physical # types were specified so we can continue to the next arg if targets is inspect.Parameter.empty: continue # If the argument value is None, and the default value is None, # pass through the None even if there is a target unit if arg is None and param.default is None: continue # Here, we check whether multiple target unit/physical type's # were specified in the decorator/annotation, or whether a # single string (unit or physical type) or a Unit object was # specified if (isinstance(targets, str) or not isinstance(targets, Sequence)): valid_targets = [targets] # Check for None in the supplied list of allowed units and, if # present and the passed value is also None, ignore. elif None in targets or NoneType in targets: if arg is None: continue else: valid_targets = [t for t in targets if t is not None] else: valid_targets = targets # If we're dealing with an annotation, skip all the targets that # are not strings or subclasses of Unit. This is to allow # non unit related annotations to pass through if is_annotation: valid_targets = [t for t in valid_targets if isinstance(t, (str, UnitBase, PhysicalType))] # Now we loop over the allowed units/physical types and validate # the value of the argument: _validate_arg_value(param.name, wrapped_function.__name__, arg, valid_targets, self.equivalencies, self.strict_dimensionless) # Call the original function with any equivalencies in force. with add_enabled_equivalencies(self.equivalencies): return_ = wrapped_function(*func_args, **func_kwargs) # Return ra = wrapped_signature.return_annotation valid_empty = (inspect.Signature.empty, None, NoneType, T.NoReturn) if ra not in valid_empty: target = (ra if T.get_origin(ra) not in (T.Annotated, T.Union) else _parse_annotation(ra)) if isinstance(target, str) or not isinstance(target, Sequence): target = [target] valid_targets = [t for t in target if isinstance(t, (str, UnitBase, PhysicalType))] _validate_arg_value("return", wrapped_function.__name__, return_, valid_targets, self.equivalencies, self.strict_dimensionless) if len(valid_targets) > 0: return_ <<= valid_targets[0] return return_ return wrapper quantity_input = QuantityInput.as_decorator
d8204c9f99851ddb1937a8bf4471699de635bcb4aef97acfa866db527f79d210
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package defines colloquially used Imperial units. They are available in the `astropy.units.imperial` namespace, but not in the top-level `astropy.units` namespace, e.g.:: >>> import astropy.units as u >>> mph = u.imperial.mile / u.hour >>> mph Unit("mi / h") To include them in `~astropy.units.UnitBase.compose` and the results of `~astropy.units.UnitBase.find_equivalent_units`, do:: >>> import astropy.units as u >>> u.imperial.enable() # doctest: +SKIP """ from . import si from .core import UnitBase, def_unit _ns = globals() ########################################################################### # LENGTH def_unit(['inch'], 2.54 * si.cm, namespace=_ns, doc="International inch") def_unit(['ft', 'foot'], 12 * inch, namespace=_ns, doc="International foot") def_unit(['yd', 'yard'], 3 * ft, namespace=_ns, doc="International yard") def_unit(['mi', 'mile'], 5280 * ft, namespace=_ns, doc="International mile") def_unit(['mil', 'thou'], 0.001 * inch, namespace=_ns, doc="Thousandth of an inch") def_unit(['nmi', 'nauticalmile', 'NM'], 1852 * si.m, namespace=_ns, doc="Nautical mile") def_unit(['fur', 'furlong'], 660 * ft, namespace=_ns, doc="Furlong") ########################################################################### # AREAS def_unit(['ac', 'acre'], 43560 * ft ** 2, namespace=_ns, doc="International acre") ########################################################################### # VOLUMES def_unit(['gallon'], si.liter / 0.264172052, namespace=_ns, doc="U.S. liquid gallon") def_unit(['quart'], gallon / 4, namespace=_ns, doc="U.S. liquid quart") def_unit(['pint'], quart / 2, namespace=_ns, doc="U.S. liquid pint") def_unit(['cup'], pint / 2, namespace=_ns, doc="U.S. customary cup") def_unit(['foz', 'fluid_oz', 'fluid_ounce'], cup / 8, namespace=_ns, doc="U.S. fluid ounce") def_unit(['tbsp', 'tablespoon'], foz / 2, namespace=_ns, doc="U.S. customary tablespoon") def_unit(['tsp', 'teaspoon'], tbsp / 3, namespace=_ns, doc="U.S. customary teaspoon") ########################################################################### # MASS def_unit(['oz', 'ounce'], 28.349523125 * si.g, namespace=_ns, doc="International avoirdupois ounce: mass") def_unit(['lb', 'lbm', 'pound'], 16 * oz, namespace=_ns, doc="International avoirdupois pound: mass") def_unit(['st', 'stone'], 14 * lb, namespace=_ns, doc="International avoirdupois stone: mass") def_unit(['ton'], 2000 * lb, namespace=_ns, doc="International avoirdupois ton: mass") def_unit(['slug'], 32.174049 * lb, namespace=_ns, doc="slug: mass") ########################################################################### # SPEED def_unit(['kn', 'kt', 'knot', 'NMPH'], nmi / si.h, namespace=_ns, doc="nautical unit of speed: 1 nmi per hour") ########################################################################### # FORCE def_unit('lbf', slug * ft * si.s**-2, namespace=_ns, doc="Pound: force") def_unit(['kip', 'kilopound'], 1000 * lbf, namespace=_ns, doc="Kilopound: force") ########################################################################## # ENERGY def_unit(['BTU', 'btu'], 1.05505585 * si.kJ, namespace=_ns, doc="British thermal unit") def_unit(['cal', 'calorie'], 4.184 * si.J, namespace=_ns, doc="Thermochemical calorie: pre-SI metric unit of energy") def_unit(['kcal', 'Cal', 'Calorie', 'kilocal', 'kilocalorie'], 1000 * cal, namespace=_ns, doc="Calorie: colloquial definition of Calorie") ########################################################################## # PRESSURE def_unit('psi', lbf * inch ** -2, namespace=_ns, doc="Pound per square inch: pressure") ########################################################################### # POWER # Imperial units def_unit(['hp', 'horsepower'], si.W / 0.00134102209, namespace=_ns, doc="Electrical horsepower") ########################################################################### # TEMPERATURE def_unit(['deg_F', 'Fahrenheit'], namespace=_ns, doc='Degrees Fahrenheit', format={'latex': r'{}^{\circ}F', 'unicode': '°F'}) def_unit(['deg_R', 'Rankine'], namespace=_ns, doc='Rankine scale: absolute scale of thermodynamic temperature') ########################################################################### # CLEANUP del UnitBase del def_unit ########################################################################### # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. from .utils import generate_unit_summary as _generate_unit_summary if __doc__ is not None: __doc__ += _generate_unit_summary(globals()) def enable(): """ Enable Imperial units so they appear in results of `~astropy.units.UnitBase.find_equivalent_units` and `~astropy.units.UnitBase.compose`. This may be used with the ``with`` statement to enable Imperial units only temporarily. """ # Local import to avoid cyclical import # Local import to avoid polluting namespace import inspect from .core import add_enabled_units return add_enabled_units(inspect.getmodule(enable))
97e81d2aa35008ac62c52227c5c2a3a45d62b94639fc45986c7057a88da438d7
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package defines units used in the CDS format, both the units defined in `Centre de Données astronomiques de Strasbourg <http://cds.u-strasbg.fr/>`_ `Standards for Astronomical Catalogues 2.0 <http://vizier.u-strasbg.fr/vizier/doc/catstd-3.2.htx>`_ format and the `complete set of supported units <https://vizier.u-strasbg.fr/viz-bin/Unit>`_. This format is used by VOTable up to version 1.2. These units are not available in the top-level `astropy.units` namespace. To use these units, you must import the `astropy.units.cds` module:: >>> from astropy.units import cds >>> q = 10. * cds.lyr # doctest: +SKIP To include them in `~astropy.units.UnitBase.compose` and the results of `~astropy.units.UnitBase.find_equivalent_units`, do:: >>> from astropy.units import cds >>> cds.enable() # doctest: +SKIP """ _ns = globals() def _initialize_module(): """Initialize CDS units module.""" # Local imports to avoid polluting top-level namespace import numpy as np from astropy import units as u from astropy.constants import si as _si from . import core # The CDS format also supports power-of-2 prefixes as defined here: # http://physics.nist.gov/cuu/Units/binary.html prefixes = core.si_prefixes + core.binary_prefixes # CDS only uses the short prefixes prefixes = [(short, short, factor) for (short, long, factor) in prefixes] # The following units are defined in alphabetical order, directly from # here: https://vizier.u-strasbg.fr/viz-bin/Unit mapping = [ (['A'], u.A, "Ampere"), (['a'], u.a, "year", ['P']), (['a0'], _si.a0, "Bohr radius"), (['al'], u.lyr, "Light year", ['c', 'd']), (['lyr'], u.lyr, "Light year"), (['alpha'], _si.alpha, "Fine structure constant"), ((['AA', 'Å'], ['Angstrom', 'Angstroem']), u.AA, "Angstrom"), (['arcmin', 'arcm'], u.arcminute, "minute of arc"), (['arcsec', 'arcs'], u.arcsecond, "second of arc"), (['atm'], _si.atm, "atmosphere"), (['AU', 'au'], u.au, "astronomical unit"), (['bar'], u.bar, "bar"), (['barn'], u.barn, "barn"), (['bit'], u.bit, "bit"), (['byte'], u.byte, "byte"), (['C'], u.C, "Coulomb"), (['c'], _si.c, "speed of light", ['p']), (['cal'], 4.1854 * u.J, "calorie"), (['cd'], u.cd, "candela"), (['ct'], u.ct, "count"), (['D'], u.D, "Debye (dipole)"), (['d'], u.d, "Julian day", ['c']), ((['deg', '°'], ['degree']), u.degree, "degree"), (['dyn'], u.dyn, "dyne"), (['e'], _si.e, "electron charge", ['m']), (['eps0'], _si.eps0, "electric constant"), (['erg'], u.erg, "erg"), (['eV'], u.eV, "electron volt"), (['F'], u.F, "Farad"), (['G'], _si.G, "Gravitation constant"), (['g'], u.g, "gram"), (['gauss'], u.G, "Gauss"), (['geoMass', 'Mgeo'], u.M_earth, "Earth mass"), (['H'], u.H, "Henry"), (['h'], u.h, "hour", ['p']), (['hr'], u.h, "hour"), (['\\h'], _si.h, "Planck constant"), (['Hz'], u.Hz, "Hertz"), (['inch'], 0.0254 * u.m, "inch"), (['J'], u.J, "Joule"), (['JD'], u.d, "Julian day", ['M']), (['jovMass', 'Mjup'], u.M_jup, "Jupiter mass"), (['Jy'], u.Jy, "Jansky"), (['K'], u.K, "Kelvin"), (['k'], _si.k_B, "Boltzmann"), (['l'], u.l, "litre", ['a']), (['lm'], u.lm, "lumen"), (['Lsun', 'solLum'], u.solLum, "solar luminosity"), (['lx'], u.lx, "lux"), (['m'], u.m, "meter"), (['mag'], u.mag, "magnitude"), (['me'], _si.m_e, "electron mass"), (['min'], u.minute, "minute"), (['MJD'], u.d, "Julian day"), (['mmHg'], 133.322387415 * u.Pa, "millimeter of mercury"), (['mol'], u.mol, "mole"), (['mp'], _si.m_p, "proton mass"), (['Msun', 'solMass'], u.solMass, "solar mass"), ((['mu0', 'µ0'], []), _si.mu0, "magnetic constant"), (['muB'], _si.muB, "Bohr magneton"), (['N'], u.N, "Newton"), (['Ohm'], u.Ohm, "Ohm"), (['Pa'], u.Pa, "Pascal"), (['pc'], u.pc, "parsec"), (['ph'], u.ph, "photon"), (['pi'], u.Unit(np.pi), "π"), (['pix'], u.pix, "pixel"), (['ppm'], u.Unit(1e-6), "parts per million"), (['R'], _si.R, "gas constant"), (['rad'], u.radian, "radian"), (['Rgeo'], _si.R_earth, "Earth equatorial radius"), (['Rjup'], _si.R_jup, "Jupiter equatorial radius"), (['Rsun', 'solRad'], u.solRad, "solar radius"), (['Ry'], u.Ry, "Rydberg"), (['S'], u.S, "Siemens"), (['s', 'sec'], u.s, "second"), (['sr'], u.sr, "steradian"), (['Sun'], u.Sun, "solar unit"), (['T'], u.T, "Tesla"), (['t'], 1e3 * u.kg, "metric tonne", ['c']), (['u'], _si.u, "atomic mass", ['da', 'a']), (['V'], u.V, "Volt"), (['W'], u.W, "Watt"), (['Wb'], u.Wb, "Weber"), (['yr'], u.a, "year"), ] for entry in mapping: if len(entry) == 3: names, unit, doc = entry excludes = [] else: names, unit, doc, excludes = entry core.def_unit(names, unit, prefixes=prefixes, namespace=_ns, doc=doc, exclude_prefixes=excludes) core.def_unit(['µas'], u.microarcsecond, doc="microsecond of arc", namespace=_ns) core.def_unit(['mas'], u.milliarcsecond, doc="millisecond of arc", namespace=_ns) core.def_unit(['---', '-'], u.dimensionless_unscaled, doc="dimensionless and unscaled", namespace=_ns) core.def_unit(['%'], u.percent, doc="percent", namespace=_ns) # The Vizier "standard" defines this in units of "kg s-3", but # that may not make a whole lot of sense, so here we just define # it as its own new disconnected unit. core.def_unit(['Crab'], prefixes=prefixes, namespace=_ns, doc="Crab (X-ray) flux") _initialize_module() ########################################################################### # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. from .utils import generate_unit_summary as _generate_unit_summary if __doc__ is not None: __doc__ += _generate_unit_summary(globals()) def enable(): """ Enable CDS units so they appear in results of `~astropy.units.UnitBase.find_equivalent_units` and `~astropy.units.UnitBase.compose`. This will disable all of the "default" `astropy.units` units, since there are some namespace clashes between the two. This may be used with the ``with`` statement to enable CDS units only temporarily. """ # Local imports to avoid cyclical import and polluting namespace import inspect from .core import set_enabled_units return set_enabled_units(inspect.getmodule(enable))
1e48d9a5558c54ab8f7ea354af7a485175e13d75b9d3d67010e2cf3db6708e0e
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module defines magnitude zero points and related photometric quantities. The corresponding magnitudes are given in the description of each unit (the actual definitions are in `~astropy.units.function.logarithmic`). """ import numpy as _numpy from astropy.constants import si as _si from . import astrophys, cgs, si from .core import Unit, UnitBase, def_unit _ns = globals() def_unit(['Bol', 'L_bol'], _si.L_bol0, namespace=_ns, prefixes=False, doc="Luminosity corresponding to absolute bolometric magnitude zero " "(magnitude ``M_bol``).") def_unit(['bol', 'f_bol'], _si.L_bol0 / (4 * _numpy.pi * (10.*astrophys.pc)**2), namespace=_ns, prefixes=False, doc="Irradiance corresponding to " "appparent bolometric magnitude zero (magnitude ``m_bol``).") def_unit(['AB', 'ABflux'], 10.**(48.6/-2.5) * cgs.erg * cgs.cm**-2 / si.s / si.Hz, namespace=_ns, prefixes=False, doc="AB magnitude zero flux density (magnitude ``ABmag``).") def_unit(['ST', 'STflux'], 10.**(21.1/-2.5) * cgs.erg * cgs.cm**-2 / si.s / si.AA, namespace=_ns, prefixes=False, doc="ST magnitude zero flux density (magnitude ``STmag``).") def_unit(['mgy', 'maggy'], namespace=_ns, prefixes=[(['n'], ['nano'], 1e-9)], doc="Maggies - a linear flux unit that is the flux for a mag=0 object." "To tie this onto a specific calibrated unit system, the " "zero_point_flux equivalency should be used.") def zero_point_flux(flux0): """ An equivalency for converting linear flux units ("maggys") defined relative to a standard source into a standardized system. Parameters ---------- flux0 : `~astropy.units.Quantity` The flux of a magnitude-0 object in the "maggy" system. """ flux_unit0 = Unit(flux0) return [(maggy, flux_unit0)] ########################################################################### # CLEANUP del UnitBase del def_unit del cgs, si, astrophys ########################################################################### # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. from .utils import generate_unit_summary as _generate_unit_summary if __doc__ is not None: __doc__ += _generate_unit_summary(globals())
57eabc16767ed2bf85d754479e4f84c69f07ac446bd3e4e7701bb06cc208934c
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Core units classes and functions """ import inspect import operator import textwrap import warnings import numpy as np from astropy.utils.decorators import lazyproperty from astropy.utils.exceptions import AstropyWarning from astropy.utils.misc import isiterable from . import format as unit_format from .utils import is_effectively_unity, resolve_fractions, sanitize_scale, validate_power __all__ = [ 'UnitsError', 'UnitsWarning', 'UnitConversionError', 'UnitTypeError', 'UnitBase', 'NamedUnit', 'IrreducibleUnit', 'Unit', 'CompositeUnit', 'PrefixUnit', 'UnrecognizedUnit', 'def_unit', 'get_current_unit_registry', 'set_enabled_units', 'add_enabled_units', 'set_enabled_equivalencies', 'add_enabled_equivalencies', 'set_enabled_aliases', 'add_enabled_aliases', 'dimensionless_unscaled', 'one', ] UNITY = 1.0 def _flatten_units_collection(items): """ Given a list of sequences, modules or dictionaries of units, or single units, return a flat set of all the units found. """ if not isinstance(items, list): items = [items] result = set() for item in items: if isinstance(item, UnitBase): result.add(item) else: if isinstance(item, dict): units = item.values() elif inspect.ismodule(item): units = vars(item).values() elif isiterable(item): units = item else: continue for unit in units: if isinstance(unit, UnitBase): result.add(unit) return result def _normalize_equivalencies(equivalencies): """ Normalizes equivalencies, ensuring each is a 4-tuple of the form:: (from_unit, to_unit, forward_func, backward_func) Parameters ---------- equivalencies : list of equivalency pairs Raises ------ ValueError if an equivalency cannot be interpreted """ if equivalencies is None: return [] normalized = [] for i, equiv in enumerate(equivalencies): if len(equiv) == 2: funit, tunit = equiv a = b = lambda x: x elif len(equiv) == 3: funit, tunit, a = equiv b = a elif len(equiv) == 4: funit, tunit, a, b = equiv else: raise ValueError( f"Invalid equivalence entry {i}: {equiv!r}") if not (funit is Unit(funit) and (tunit is None or tunit is Unit(tunit)) and callable(a) and callable(b)): raise ValueError( f"Invalid equivalence entry {i}: {equiv!r}") normalized.append((funit, tunit, a, b)) return normalized class _UnitRegistry: """ Manages a registry of the enabled units. """ def __init__(self, init=[], equivalencies=[], aliases={}): if isinstance(init, _UnitRegistry): # If passed another registry we don't need to rebuild everything. # but because these are mutable types we don't want to create # conflicts so everything needs to be copied. self._equivalencies = init._equivalencies.copy() self._aliases = init._aliases.copy() self._all_units = init._all_units.copy() self._registry = init._registry.copy() self._non_prefix_units = init._non_prefix_units.copy() # The physical type is a dictionary containing sets as values. # All of these must be copied otherwise we could alter the old # registry. self._by_physical_type = {k: v.copy() for k, v in init._by_physical_type.items()} else: self._reset_units() self._reset_equivalencies() self._reset_aliases() self.add_enabled_units(init) self.add_enabled_equivalencies(equivalencies) self.add_enabled_aliases(aliases) def _reset_units(self): self._all_units = set() self._non_prefix_units = set() self._registry = {} self._by_physical_type = {} def _reset_equivalencies(self): self._equivalencies = set() def _reset_aliases(self): self._aliases = {} @property def registry(self): return self._registry @property def all_units(self): return self._all_units @property def non_prefix_units(self): return self._non_prefix_units def set_enabled_units(self, units): """ Sets the units enabled in the unit registry. These units are searched when using `UnitBase.find_equivalent_units`, for example. Parameters ---------- units : list of sequence, dict, or module This is a list of things in which units may be found (sequences, dicts or modules), or units themselves. The entire set will be "enabled" for searching through by methods like `UnitBase.find_equivalent_units` and `UnitBase.compose`. """ self._reset_units() return self.add_enabled_units(units) def add_enabled_units(self, units): """ Adds to the set of units enabled in the unit registry. These units are searched when using `UnitBase.find_equivalent_units`, for example. Parameters ---------- units : list of sequence, dict, or module This is a list of things in which units may be found (sequences, dicts or modules), or units themselves. The entire set will be added to the "enabled" set for searching through by methods like `UnitBase.find_equivalent_units` and `UnitBase.compose`. """ units = _flatten_units_collection(units) for unit in units: # Loop through all of the names first, to ensure all of them # are new, then add them all as a single "transaction" below. for st in unit._names: if (st in self._registry and unit != self._registry[st]): raise ValueError( "Object with name {!r} already exists in namespace. " "Filter the set of units to avoid name clashes before " "enabling them.".format(st)) for st in unit._names: self._registry[st] = unit self._all_units.add(unit) if not isinstance(unit, PrefixUnit): self._non_prefix_units.add(unit) hash = unit._get_physical_type_id() self._by_physical_type.setdefault(hash, set()).add(unit) def get_units_with_physical_type(self, unit): """ Get all units in the registry with the same physical type as the given unit. Parameters ---------- unit : UnitBase instance """ return self._by_physical_type.get(unit._get_physical_type_id(), set()) @property def equivalencies(self): return list(self._equivalencies) def set_enabled_equivalencies(self, equivalencies): """ Sets the equivalencies enabled in the unit registry. These equivalencies are used if no explicit equivalencies are given, both in unit conversion and in finding equivalent units. This is meant in particular for allowing angles to be dimensionless. Use with care. Parameters ---------- equivalencies : list of tuple List of equivalent pairs, e.g., as returned by `~astropy.units.equivalencies.dimensionless_angles`. """ self._reset_equivalencies() return self.add_enabled_equivalencies(equivalencies) def add_enabled_equivalencies(self, equivalencies): """ Adds to the set of equivalencies enabled in the unit registry. These equivalencies are used if no explicit equivalencies are given, both in unit conversion and in finding equivalent units. This is meant in particular for allowing angles to be dimensionless. Use with care. Parameters ---------- equivalencies : list of tuple List of equivalent pairs, e.g., as returned by `~astropy.units.equivalencies.dimensionless_angles`. """ # pre-normalize list to help catch mistakes equivalencies = _normalize_equivalencies(equivalencies) self._equivalencies |= set(equivalencies) @property def aliases(self): return self._aliases def set_enabled_aliases(self, aliases): """ Set aliases for units. Parameters ---------- aliases : dict of str, Unit The aliases to set. The keys must be the string aliases, and values must be the `astropy.units.Unit` that the alias will be mapped to. Raises ------ ValueError If the alias already defines a different unit. """ self._reset_aliases() self.add_enabled_aliases(aliases) def add_enabled_aliases(self, aliases): """ Add aliases for units. Parameters ---------- aliases : dict of str, Unit The aliases to add. The keys must be the string aliases, and values must be the `astropy.units.Unit` that the alias will be mapped to. Raises ------ ValueError If the alias already defines a different unit. """ for alias, unit in aliases.items(): if alias in self._registry and unit != self._registry[alias]: raise ValueError( f"{alias} already means {self._registry[alias]}, so " f"cannot be used as an alias for {unit}.") if alias in self._aliases and unit != self._aliases[alias]: raise ValueError( f"{alias} already is an alias for {self._aliases[alias]}, so " f"cannot be used as an alias for {unit}.") for alias, unit in aliases.items(): if alias not in self._registry and alias not in self._aliases: self._aliases[alias] = unit class _UnitContext: def __init__(self, init=[], equivalencies=[]): _unit_registries.append( _UnitRegistry(init=init, equivalencies=equivalencies)) def __enter__(self): pass def __exit__(self, type, value, tb): _unit_registries.pop() _unit_registries = [_UnitRegistry()] def get_current_unit_registry(): return _unit_registries[-1] def set_enabled_units(units): """ Sets the units enabled in the unit registry. These units are searched when using `UnitBase.find_equivalent_units`, for example. This may be used either permanently, or as a context manager using the ``with`` statement (see example below). Parameters ---------- units : list of sequence, dict, or module This is a list of things in which units may be found (sequences, dicts or modules), or units themselves. The entire set will be "enabled" for searching through by methods like `UnitBase.find_equivalent_units` and `UnitBase.compose`. Examples -------- >>> from astropy import units as u >>> with u.set_enabled_units([u.pc]): ... u.m.find_equivalent_units() ... Primary name | Unit definition | Aliases [ pc | 3.08568e+16 m | parsec , ] >>> u.m.find_equivalent_units() Primary name | Unit definition | Aliases [ AU | 1.49598e+11 m | au, astronomical_unit , Angstrom | 1e-10 m | AA, angstrom , cm | 0.01 m | centimeter , earthRad | 6.3781e+06 m | R_earth, Rearth , jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter , lsec | 2.99792e+08 m | lightsecond , lyr | 9.46073e+15 m | lightyear , m | irreducible | meter , micron | 1e-06 m | , pc | 3.08568e+16 m | parsec , solRad | 6.957e+08 m | R_sun, Rsun , ] """ # get a context with a new registry, using equivalencies of the current one context = _UnitContext( equivalencies=get_current_unit_registry().equivalencies) # in this new current registry, enable the units requested get_current_unit_registry().set_enabled_units(units) return context def add_enabled_units(units): """ Adds to the set of units enabled in the unit registry. These units are searched when using `UnitBase.find_equivalent_units`, for example. This may be used either permanently, or as a context manager using the ``with`` statement (see example below). Parameters ---------- units : list of sequence, dict, or module This is a list of things in which units may be found (sequences, dicts or modules), or units themselves. The entire set will be added to the "enabled" set for searching through by methods like `UnitBase.find_equivalent_units` and `UnitBase.compose`. Examples -------- >>> from astropy import units as u >>> from astropy.units import imperial >>> with u.add_enabled_units(imperial): ... u.m.find_equivalent_units() ... Primary name | Unit definition | Aliases [ AU | 1.49598e+11 m | au, astronomical_unit , Angstrom | 1e-10 m | AA, angstrom , cm | 0.01 m | centimeter , earthRad | 6.3781e+06 m | R_earth, Rearth , ft | 0.3048 m | foot , fur | 201.168 m | furlong , inch | 0.0254 m | , jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter , lsec | 2.99792e+08 m | lightsecond , lyr | 9.46073e+15 m | lightyear , m | irreducible | meter , mi | 1609.34 m | mile , micron | 1e-06 m | , mil | 2.54e-05 m | thou , nmi | 1852 m | nauticalmile, NM , pc | 3.08568e+16 m | parsec , solRad | 6.957e+08 m | R_sun, Rsun , yd | 0.9144 m | yard , ] """ # get a context with a new registry, which is a copy of the current one context = _UnitContext(get_current_unit_registry()) # in this new current registry, enable the further units requested get_current_unit_registry().add_enabled_units(units) return context def set_enabled_equivalencies(equivalencies): """ Sets the equivalencies enabled in the unit registry. These equivalencies are used if no explicit equivalencies are given, both in unit conversion and in finding equivalent units. This is meant in particular for allowing angles to be dimensionless. Use with care. Parameters ---------- equivalencies : list of tuple list of equivalent pairs, e.g., as returned by `~astropy.units.equivalencies.dimensionless_angles`. Examples -------- Exponentiation normally requires dimensionless quantities. To avoid problems with complex phases:: >>> from astropy import units as u >>> with u.set_enabled_equivalencies(u.dimensionless_angles()): ... phase = 0.5 * u.cycle ... np.exp(1j*phase) # doctest: +FLOAT_CMP <Quantity -1.+1.2246468e-16j> """ # get a context with a new registry, using all units of the current one context = _UnitContext(get_current_unit_registry()) # in this new current registry, enable the equivalencies requested get_current_unit_registry().set_enabled_equivalencies(equivalencies) return context def add_enabled_equivalencies(equivalencies): """ Adds to the equivalencies enabled in the unit registry. These equivalencies are used if no explicit equivalencies are given, both in unit conversion and in finding equivalent units. This is meant in particular for allowing angles to be dimensionless. Since no equivalencies are enabled by default, generally it is recommended to use `set_enabled_equivalencies`. Parameters ---------- equivalencies : list of tuple list of equivalent pairs, e.g., as returned by `~astropy.units.equivalencies.dimensionless_angles`. """ # get a context with a new registry, which is a copy of the current one context = _UnitContext(get_current_unit_registry()) # in this new current registry, enable the further equivalencies requested get_current_unit_registry().add_enabled_equivalencies(equivalencies) return context def set_enabled_aliases(aliases): """ Set aliases for units. This is useful for handling alternate spellings for units, or misspelled units in files one is trying to read. Parameters ---------- aliases : dict of str, Unit The aliases to set. The keys must be the string aliases, and values must be the `astropy.units.Unit` that the alias will be mapped to. Raises ------ ValueError If the alias already defines a different unit. Examples -------- To temporarily allow for a misspelled 'Angstroem' unit:: >>> from astropy import units as u >>> with u.set_enabled_aliases({'Angstroem': u.Angstrom}): ... print(u.Unit("Angstroem", parse_strict="raise") == u.Angstrom) True """ # get a context with a new registry, which is a copy of the current one context = _UnitContext(get_current_unit_registry()) # in this new current registry, enable the further equivalencies requested get_current_unit_registry().set_enabled_aliases(aliases) return context def add_enabled_aliases(aliases): """ Add aliases for units. This is useful for handling alternate spellings for units, or misspelled units in files one is trying to read. Since no aliases are enabled by default, generally it is recommended to use `set_enabled_aliases`. Parameters ---------- aliases : dict of str, Unit The aliases to add. The keys must be the string aliases, and values must be the `astropy.units.Unit` that the alias will be mapped to. Raises ------ ValueError If the alias already defines a different unit. Examples -------- To temporarily allow for a misspelled 'Angstroem' unit:: >>> from astropy import units as u >>> with u.add_enabled_aliases({'Angstroem': u.Angstrom}): ... print(u.Unit("Angstroem", parse_strict="raise") == u.Angstrom) True """ # get a context with a new registry, which is a copy of the current one context = _UnitContext(get_current_unit_registry()) # in this new current registry, enable the further equivalencies requested get_current_unit_registry().add_enabled_aliases(aliases) return context class UnitsError(Exception): """ The base class for unit-specific exceptions. """ class UnitScaleError(UnitsError, ValueError): """ Used to catch the errors involving scaled units, which are not recognized by FITS format. """ pass class UnitConversionError(UnitsError, ValueError): """ Used specifically for errors related to converting between units or interpreting units in terms of other units. """ class UnitTypeError(UnitsError, TypeError): """ Used specifically for errors in setting to units not allowed by a class. E.g., would be raised if the unit of an `~astropy.coordinates.Angle` instances were set to a non-angular unit. """ class UnitsWarning(AstropyWarning): """ The base class for unit-specific warnings. """ class UnitBase: """ Abstract base class for units. Most of the arithmetic operations on units are defined in this base class. Should not be instantiated by users directly. """ # Make sure that __rmul__ of units gets called over the __mul__ of Numpy # arrays to avoid element-wise multiplication. __array_priority__ = 1000 _hash = None def __deepcopy__(self, memo): # This may look odd, but the units conversion will be very # broken after deep-copying if we don't guarantee that a given # physical unit corresponds to only one instance return self def _repr_latex_(self): """ Generate latex representation of unit name. This is used by the IPython notebook to print a unit with a nice layout. Returns ------- Latex string """ return unit_format.Latex.to_string(self) def __bytes__(self): """Return string representation for unit""" return unit_format.Generic.to_string(self).encode('unicode_escape') def __str__(self): """Return string representation for unit""" return unit_format.Generic.to_string(self) def __repr__(self): string = unit_format.Generic.to_string(self) return f'Unit("{string}")' def _get_physical_type_id(self): """ Returns an identifier that uniquely identifies the physical type of this unit. It is comprised of the bases and powers of this unit, without the scale. Since it is hashable, it is useful as a dictionary key. """ unit = self.decompose() r = zip([x.name for x in unit.bases], unit.powers) # bases and powers are already sorted in a unique way # r.sort() r = tuple(r) return r @property def names(self): """ Returns all of the names associated with this unit. """ raise AttributeError( "Can not get names from unnamed units. " "Perhaps you meant to_string()?") @property def name(self): """ Returns the canonical (short) name associated with this unit. """ raise AttributeError( "Can not get names from unnamed units. " "Perhaps you meant to_string()?") @property def aliases(self): """ Returns the alias (long) names for this unit. """ raise AttributeError( "Can not get aliases from unnamed units. " "Perhaps you meant to_string()?") @property def scale(self): """ Return the scale of the unit. """ return 1.0 @property def bases(self): """ Return the bases of the unit. """ return [self] @property def powers(self): """ Return the powers of the unit. """ return [1] def to_string(self, format=unit_format.Generic): """ Output the unit in the given format as a string. Parameters ---------- format : `astropy.units.format.Base` instance or str The name of a format or a formatter object. If not provided, defaults to the generic format. """ f = unit_format.get_format(format) return f.to_string(self) def __format__(self, format_spec): """Try to format units using a formatter.""" try: return self.to_string(format=format_spec) except ValueError: return format(str(self), format_spec) @staticmethod def _normalize_equivalencies(equivalencies): """ Normalizes equivalencies, ensuring each is a 4-tuple of the form:: (from_unit, to_unit, forward_func, backward_func) Parameters ---------- equivalencies : list of equivalency pairs, or None Returns ------- A normalized list, including possible global defaults set by, e.g., `set_enabled_equivalencies`, except when `equivalencies`=`None`, in which case the returned list is always empty. Raises ------ ValueError if an equivalency cannot be interpreted """ normalized = _normalize_equivalencies(equivalencies) if equivalencies is not None: normalized += get_current_unit_registry().equivalencies return normalized def __pow__(self, p): p = validate_power(p) return CompositeUnit(1, [self], [p], _error_check=False) def __truediv__(self, m): if isinstance(m, (bytes, str)): m = Unit(m) if isinstance(m, UnitBase): if m.is_unity(): return self return CompositeUnit(1, [self, m], [1, -1], _error_check=False) try: # Cannot handle this as Unit, re-try as Quantity from .quantity import Quantity return Quantity(1, self) / m except TypeError: return NotImplemented def __rtruediv__(self, m): if isinstance(m, (bytes, str)): return Unit(m) / self try: # Cannot handle this as Unit. Here, m cannot be a Quantity, # so we make it into one, fasttracking when it does not have a # unit, for the common case of <array> / <unit>. from .quantity import Quantity if hasattr(m, 'unit'): result = Quantity(m) result /= self return result else: return Quantity(m, self**(-1)) except TypeError: return NotImplemented def __mul__(self, m): if isinstance(m, (bytes, str)): m = Unit(m) if isinstance(m, UnitBase): if m.is_unity(): return self elif self.is_unity(): return m return CompositeUnit(1, [self, m], [1, 1], _error_check=False) # Cannot handle this as Unit, re-try as Quantity. try: from .quantity import Quantity return Quantity(1, self) * m except TypeError: return NotImplemented def __rmul__(self, m): if isinstance(m, (bytes, str)): return Unit(m) * self # Cannot handle this as Unit. Here, m cannot be a Quantity, # so we make it into one, fasttracking when it does not have a unit # for the common case of <array> * <unit>. try: from .quantity import Quantity if hasattr(m, 'unit'): result = Quantity(m) result *= self return result else: return Quantity(m, self) except TypeError: return NotImplemented def __rlshift__(self, m): try: from .quantity import Quantity return Quantity(m, self, copy=False, subok=True) except Exception: return NotImplemented def __rrshift__(self, m): warnings.warn(">> is not implemented. Did you mean to convert " "to a Quantity with unit {} using '<<'?".format(self), AstropyWarning) return NotImplemented def __hash__(self): if self._hash is None: parts = ([str(self.scale)] + [x.name for x in self.bases] + [str(x) for x in self.powers]) self._hash = hash(tuple(parts)) return self._hash def __getstate__(self): # If we get pickled, we should *not* store the memoized hash since # hashes of strings vary between sessions. state = self.__dict__.copy() state.pop('_hash', None) return state def __eq__(self, other): if self is other: return True try: other = Unit(other, parse_strict='silent') except (ValueError, UnitsError, TypeError): return NotImplemented # Other is unit-like, but the test below requires it is a UnitBase # instance; if it is not, give up (so that other can try). if not isinstance(other, UnitBase): return NotImplemented try: return is_effectively_unity(self._to(other)) except UnitsError: return False def __ne__(self, other): return not (self == other) def __le__(self, other): scale = self._to(Unit(other)) return scale <= 1. or is_effectively_unity(scale) def __ge__(self, other): scale = self._to(Unit(other)) return scale >= 1. or is_effectively_unity(scale) def __lt__(self, other): return not (self >= other) def __gt__(self, other): return not (self <= other) def __neg__(self): return self * -1. def is_equivalent(self, other, equivalencies=[]): """ Returns `True` if this unit is equivalent to ``other``. Parameters ---------- other : `~astropy.units.Unit`, str, or tuple The unit to convert to. If a tuple of units is specified, this method returns true if the unit matches any of those in the tuple. equivalencies : list of tuple A list of equivalence pairs to try if the units are not directly convertible. See :ref:`astropy:unit_equivalencies`. This list is in addition to possible global defaults set by, e.g., `set_enabled_equivalencies`. Use `None` to turn off all equivalencies. Returns ------- bool """ equivalencies = self._normalize_equivalencies(equivalencies) if isinstance(other, tuple): return any(self.is_equivalent(u, equivalencies=equivalencies) for u in other) other = Unit(other, parse_strict='silent') return self._is_equivalent(other, equivalencies) def _is_equivalent(self, other, equivalencies=[]): """Returns `True` if this unit is equivalent to `other`. See `is_equivalent`, except that a proper Unit object should be given (i.e., no string) and that the equivalency list should be normalized using `_normalize_equivalencies`. """ if isinstance(other, UnrecognizedUnit): return False if (self._get_physical_type_id() == other._get_physical_type_id()): return True elif len(equivalencies): unit = self.decompose() other = other.decompose() for a, b, forward, backward in equivalencies: if b is None: # after canceling, is what's left convertible # to dimensionless (according to the equivalency)? try: (other/unit).decompose([a]) return True except Exception: pass else: if(a._is_equivalent(unit) and b._is_equivalent(other) or b._is_equivalent(unit) and a._is_equivalent(other)): return True return False def _apply_equivalencies(self, unit, other, equivalencies): """ Internal function (used from `_get_converter`) to apply equivalence pairs. """ def make_converter(scale1, func, scale2): def convert(v): return func(_condition_arg(v) / scale1) * scale2 return convert for funit, tunit, a, b in equivalencies: if tunit is None: try: ratio_in_funit = (other.decompose() / unit.decompose()).decompose([funit]) return make_converter(ratio_in_funit.scale, a, 1.) except UnitsError: pass else: try: scale1 = funit._to(unit) scale2 = tunit._to(other) return make_converter(scale1, a, scale2) except UnitsError: pass try: scale1 = tunit._to(unit) scale2 = funit._to(other) return make_converter(scale1, b, scale2) except UnitsError: pass def get_err_str(unit): unit_str = unit.to_string('unscaled') physical_type = unit.physical_type if physical_type != 'unknown': unit_str = f"'{unit_str}' ({physical_type})" else: unit_str = f"'{unit_str}'" return unit_str unit_str = get_err_str(unit) other_str = get_err_str(other) raise UnitConversionError( f"{unit_str} and {other_str} are not convertible") def _get_converter(self, other, equivalencies=[]): """Get a converter for values in ``self`` to ``other``. If no conversion is necessary, returns ``unit_scale_converter`` (which is used as a check in quantity helpers). """ # First see if it is just a scaling. try: scale = self._to(other) except UnitsError: pass else: if scale == 1.: return unit_scale_converter else: return lambda val: scale * _condition_arg(val) # if that doesn't work, maybe we can do it with equivalencies? try: return self._apply_equivalencies( self, other, self._normalize_equivalencies(equivalencies)) except UnitsError as exc: # Last hope: maybe other knows how to do it? # We assume the equivalencies have the unit itself as first item. # TODO: maybe better for other to have a `_back_converter` method? if hasattr(other, 'equivalencies'): for funit, tunit, a, b in other.equivalencies: if other is funit: try: return lambda v: b(self._get_converter( tunit, equivalencies=equivalencies)(v)) except Exception: pass raise exc def _to(self, other): """ Returns the scale to the specified unit. See `to`, except that a Unit object should be given (i.e., no string), and that all defaults are used, i.e., no equivalencies and value=1. """ # There are many cases where we just want to ensure a Quantity is # of a particular unit, without checking whether it's already in # a particular unit. If we're being asked to convert from a unit # to itself, we can short-circuit all of this. if self is other: return 1.0 # Don't presume decomposition is possible; e.g., # conversion to function units is through equivalencies. if isinstance(other, UnitBase): self_decomposed = self.decompose() other_decomposed = other.decompose() # Check quickly whether equivalent. This is faster than # `is_equivalent`, because it doesn't generate the entire # physical type list of both units. In other words it "fails # fast". if(self_decomposed.powers == other_decomposed.powers and all(self_base is other_base for (self_base, other_base) in zip(self_decomposed.bases, other_decomposed.bases))): return self_decomposed.scale / other_decomposed.scale raise UnitConversionError( f"'{self!r}' is not a scaled version of '{other!r}'") def to(self, other, value=UNITY, equivalencies=[]): """ Return the converted values in the specified unit. Parameters ---------- other : unit-like The unit to convert to. value : int, float, or scalar array-like, optional Value(s) in the current unit to be converted to the specified unit. If not provided, defaults to 1.0 equivalencies : list of tuple A list of equivalence pairs to try if the units are not directly convertible. See :ref:`astropy:unit_equivalencies`. This list is in addition to possible global defaults set by, e.g., `set_enabled_equivalencies`. Use `None` to turn off all equivalencies. Returns ------- values : scalar or array Converted value(s). Input value sequences are returned as numpy arrays. Raises ------ UnitsError If units are inconsistent """ if other is self and value is UNITY: return UNITY else: return self._get_converter(Unit(other), equivalencies=equivalencies)(value) def in_units(self, other, value=1.0, equivalencies=[]): """ Alias for `to` for backward compatibility with pynbody. """ return self.to( other, value=value, equivalencies=equivalencies) def decompose(self, bases=set()): """ Return a unit object composed of only irreducible units. Parameters ---------- bases : sequence of UnitBase, optional The bases to decompose into. When not provided, decomposes down to any irreducible units. When provided, the decomposed result will only contain the given units. This will raises a `UnitsError` if it's not possible to do so. Returns ------- unit : `~astropy.units.CompositeUnit` New object containing only irreducible unit objects. """ raise NotImplementedError() def _compose(self, equivalencies=[], namespace=[], max_depth=2, depth=0, cached_results=None): def is_final_result(unit): # Returns True if this result contains only the expected # units for base in unit.bases: if base not in namespace: return False return True unit = self.decompose() key = hash(unit) cached = cached_results.get(key) if cached is not None: if isinstance(cached, Exception): raise cached return cached # Prevent too many levels of recursion # And special case for dimensionless unit if depth >= max_depth: cached_results[key] = [unit] return [unit] # Make a list including all of the equivalent units units = [unit] for funit, tunit, a, b in equivalencies: if tunit is not None: if self._is_equivalent(funit): scale = funit.decompose().scale / unit.scale units.append(Unit(a(1.0 / scale) * tunit).decompose()) elif self._is_equivalent(tunit): scale = tunit.decompose().scale / unit.scale units.append(Unit(b(1.0 / scale) * funit).decompose()) else: if self._is_equivalent(funit): units.append(Unit(unit.scale)) # Store partial results partial_results = [] # Store final results that reduce to a single unit or pair of # units if len(unit.bases) == 0: final_results = [set([unit]), set()] else: final_results = [set(), set()] for tunit in namespace: tunit_decomposed = tunit.decompose() for u in units: # If the unit is a base unit, look for an exact match # to one of the bases of the target unit. If found, # factor by the same power as the target unit's base. # This allows us to factor out fractional powers # without needing to do an exhaustive search. if len(tunit_decomposed.bases) == 1: for base, power in zip(u.bases, u.powers): if tunit_decomposed._is_equivalent(base): tunit = tunit ** power tunit_decomposed = tunit_decomposed ** power break composed = (u / tunit_decomposed).decompose() factored = composed * tunit len_bases = len(composed.bases) if is_final_result(factored) and len_bases <= 1: final_results[len_bases].add(factored) else: partial_results.append( (len_bases, composed, tunit)) # Do we have any minimal results? for final_result in final_results: if len(final_result): results = final_results[0].union(final_results[1]) cached_results[key] = results return results partial_results.sort(key=operator.itemgetter(0)) # ...we have to recurse and try to further compose results = [] for len_bases, composed, tunit in partial_results: try: composed_list = composed._compose( equivalencies=equivalencies, namespace=namespace, max_depth=max_depth, depth=depth + 1, cached_results=cached_results) except UnitsError: composed_list = [] for subcomposed in composed_list: results.append( (len(subcomposed.bases), subcomposed, tunit)) if len(results): results.sort(key=operator.itemgetter(0)) min_length = results[0][0] subresults = set() for len_bases, composed, tunit in results: if len_bases > min_length: break else: factored = composed * tunit if is_final_result(factored): subresults.add(factored) if len(subresults): cached_results[key] = subresults return subresults if not is_final_result(self): result = UnitsError( f"Cannot represent unit {self} in terms of the given units") cached_results[key] = result raise result cached_results[key] = [self] return [self] def compose(self, equivalencies=[], units=None, max_depth=2, include_prefix_units=None): """ Return the simplest possible composite unit(s) that represent the given unit. Since there may be multiple equally simple compositions of the unit, a list of units is always returned. Parameters ---------- equivalencies : list of tuple A list of equivalence pairs to also list. See :ref:`astropy:unit_equivalencies`. This list is in addition to possible global defaults set by, e.g., `set_enabled_equivalencies`. Use `None` to turn off all equivalencies. units : set of `~astropy.units.Unit`, optional If not provided, any known units may be used to compose into. Otherwise, ``units`` is a dict, module or sequence containing the units to compose into. max_depth : int, optional The maximum recursion depth to use when composing into composite units. include_prefix_units : bool, optional When `True`, include prefixed units in the result. Default is `True` if a sequence is passed in to ``units``, `False` otherwise. Returns ------- units : list of `CompositeUnit` A list of candidate compositions. These will all be equally simple, but it may not be possible to automatically determine which of the candidates are better. """ # if units parameter is specified and is a sequence (list|tuple), # include_prefix_units is turned on by default. Ex: units=[u.kpc] if include_prefix_units is None: include_prefix_units = isinstance(units, (list, tuple)) # Pre-normalize the equivalencies list equivalencies = self._normalize_equivalencies(equivalencies) # The namespace of units to compose into should be filtered to # only include units with bases in common with self, otherwise # they can't possibly provide useful results. Having too many # destination units greatly increases the search space. def has_bases_in_common(a, b): if len(a.bases) == 0 and len(b.bases) == 0: return True for ab in a.bases: for bb in b.bases: if ab == bb: return True return False def has_bases_in_common_with_equiv(unit, other): if has_bases_in_common(unit, other): return True for funit, tunit, a, b in equivalencies: if tunit is not None: if unit._is_equivalent(funit): if has_bases_in_common(tunit.decompose(), other): return True elif unit._is_equivalent(tunit): if has_bases_in_common(funit.decompose(), other): return True else: if unit._is_equivalent(funit): if has_bases_in_common(dimensionless_unscaled, other): return True return False def filter_units(units): filtered_namespace = set() for tunit in units: if (isinstance(tunit, UnitBase) and (include_prefix_units or not isinstance(tunit, PrefixUnit)) and has_bases_in_common_with_equiv( decomposed, tunit.decompose())): filtered_namespace.add(tunit) return filtered_namespace decomposed = self.decompose() if units is None: units = filter_units(self._get_units_with_same_physical_type( equivalencies=equivalencies)) if len(units) == 0: units = get_current_unit_registry().non_prefix_units elif isinstance(units, dict): units = set(filter_units(units.values())) elif inspect.ismodule(units): units = filter_units(vars(units).values()) else: units = filter_units(_flatten_units_collection(units)) def sort_results(results): if not len(results): return [] # Sort the results so the simplest ones appear first. # Simplest is defined as "the minimum sum of absolute # powers" (i.e. the fewest bases), and preference should # be given to results where the sum of powers is positive # and the scale is exactly equal to 1.0 results = list(results) results.sort(key=lambda x: np.abs(x.scale)) results.sort(key=lambda x: np.sum(np.abs(x.powers))) results.sort(key=lambda x: np.sum(x.powers) < 0.0) results.sort(key=lambda x: not is_effectively_unity(x.scale)) last_result = results[0] filtered = [last_result] for result in results[1:]: if str(result) != str(last_result): filtered.append(result) last_result = result return filtered return sort_results(self._compose( equivalencies=equivalencies, namespace=units, max_depth=max_depth, depth=0, cached_results={})) def to_system(self, system): """ Converts this unit into ones belonging to the given system. Since more than one result may be possible, a list is always returned. Parameters ---------- system : module The module that defines the unit system. Commonly used ones include `astropy.units.si` and `astropy.units.cgs`. To use your own module it must contain unit objects and a sequence member named ``bases`` containing the base units of the system. Returns ------- units : list of `CompositeUnit` The list is ranked so that units containing only the base units of that system will appear first. """ bases = set(system.bases) def score(compose): # In case that compose._bases has no elements we return # 'np.inf' as 'score value'. It does not really matter which # number we would return. This case occurs for instance for # dimensionless quantities: compose_bases = compose.bases if len(compose_bases) == 0: return np.inf else: sum = 0 for base in compose_bases: if base in bases: sum += 1 return sum / float(len(compose_bases)) x = self.decompose(bases=bases) composed = x.compose(units=system) composed = sorted(composed, key=score, reverse=True) return composed @lazyproperty def si(self): """ Returns a copy of the current `Unit` instance in SI units. """ from . import si return self.to_system(si)[0] @lazyproperty def cgs(self): """ Returns a copy of the current `Unit` instance with CGS units. """ from . import cgs return self.to_system(cgs)[0] @property def physical_type(self): """ Physical type(s) dimensionally compatible with the unit. Returns ------- `~astropy.units.physical.PhysicalType` A representation of the physical type(s) of a unit. Examples -------- >>> from astropy import units as u >>> u.m.physical_type PhysicalType('length') >>> (u.m ** 2 / u.s).physical_type PhysicalType({'diffusivity', 'kinematic viscosity'}) Physical types can be compared to other physical types (recommended in packages) or to strings. >>> area = (u.m ** 2).physical_type >>> area == u.m.physical_type ** 2 True >>> area == "area" True `~astropy.units.physical.PhysicalType` objects can be used for dimensional analysis. >>> number_density = u.m.physical_type ** -3 >>> velocity = (u.m / u.s).physical_type >>> number_density * velocity PhysicalType('particle flux') """ from . import physical return physical.get_physical_type(self) def _get_units_with_same_physical_type(self, equivalencies=[]): """ Return a list of registered units with the same physical type as this unit. This function is used by Quantity to add its built-in conversions to equivalent units. This is a private method, since end users should be encouraged to use the more powerful `compose` and `find_equivalent_units` methods (which use this under the hood). Parameters ---------- equivalencies : list of tuple A list of equivalence pairs to also pull options from. See :ref:`astropy:unit_equivalencies`. It must already be normalized using `_normalize_equivalencies`. """ unit_registry = get_current_unit_registry() units = set(unit_registry.get_units_with_physical_type(self)) for funit, tunit, a, b in equivalencies: if tunit is not None: if self.is_equivalent(funit) and tunit not in units: units.update( unit_registry.get_units_with_physical_type(tunit)) if self._is_equivalent(tunit) and funit not in units: units.update( unit_registry.get_units_with_physical_type(funit)) else: if self.is_equivalent(funit): units.add(dimensionless_unscaled) return units class EquivalentUnitsList(list): """ A class to handle pretty-printing the result of `find_equivalent_units`. """ HEADING_NAMES = ('Primary name', 'Unit definition', 'Aliases') ROW_LEN = 3 # len(HEADING_NAMES), but hard-code since it is constant NO_EQUIV_UNITS_MSG = 'There are no equivalent units' def __repr__(self): if len(self) == 0: return self.NO_EQUIV_UNITS_MSG else: lines = self._process_equivalent_units(self) lines.insert(0, self.HEADING_NAMES) widths = [0] * self.ROW_LEN for line in lines: for i, col in enumerate(line): widths[i] = max(widths[i], len(col)) f = " {{0:<{0}s}} | {{1:<{1}s}} | {{2:<{2}s}}".format(*widths) lines = [f.format(*line) for line in lines] lines = (lines[0:1] + ['['] + [f'{x} ,' for x in lines[1:]] + [']']) return '\n'.join(lines) def _repr_html_(self): """ Outputs a HTML table representation within Jupyter notebooks. """ if len(self) == 0: return f"<p>{self.NO_EQUIV_UNITS_MSG}</p>" else: # HTML tags to use to compose the table in HTML blank_table = '<table style="width:50%">{}</table>' blank_row_container = "<tr>{}</tr>" heading_row_content = "<th>{}</th>" * self.ROW_LEN data_row_content = "<td>{}</td>" * self.ROW_LEN # The HTML will be rendered & the table is simple, so don't # bother to include newlines & indentation for the HTML code. heading_row = blank_row_container.format( heading_row_content.format(*self.HEADING_NAMES)) data_rows = self._process_equivalent_units(self) all_rows = heading_row for row in data_rows: html_row = blank_row_container.format( data_row_content.format(*row)) all_rows += html_row return blank_table.format(all_rows) @staticmethod def _process_equivalent_units(equiv_units_data): """ Extract attributes, and sort, the equivalent units pre-formatting. """ processed_equiv_units = [] for u in equiv_units_data: irred = u.decompose().to_string() if irred == u.name: irred = 'irreducible' processed_equiv_units.append( (u.name, irred, ', '.join(u.aliases))) processed_equiv_units.sort() return processed_equiv_units def find_equivalent_units(self, equivalencies=[], units=None, include_prefix_units=False): """ Return a list of all the units that are the same type as ``self``. Parameters ---------- equivalencies : list of tuple A list of equivalence pairs to also list. See :ref:`astropy:unit_equivalencies`. Any list given, including an empty one, supersedes global defaults that may be in effect (as set by `set_enabled_equivalencies`) units : set of `~astropy.units.Unit`, optional If not provided, all defined units will be searched for equivalencies. Otherwise, may be a dict, module or sequence containing the units to search for equivalencies. include_prefix_units : bool, optional When `True`, include prefixed units in the result. Default is `False`. Returns ------- units : list of `UnitBase` A list of unit objects that match ``u``. A subclass of `list` (``EquivalentUnitsList``) is returned that pretty-prints the list of units when output. """ results = self.compose( equivalencies=equivalencies, units=units, max_depth=1, include_prefix_units=include_prefix_units) results = set( x.bases[0] for x in results if len(x.bases) == 1) return self.EquivalentUnitsList(results) def is_unity(self): """ Returns `True` if the unit is unscaled and dimensionless. """ return False class NamedUnit(UnitBase): """ The base class of units that have a name. Parameters ---------- st : str, list of str, 2-tuple The name of the unit. If a list of strings, the first element is the canonical (short) name, and the rest of the elements are aliases. If a tuple of lists, the first element is a list of short names, and the second element is a list of long names; all but the first short name are considered "aliases". Each name *should* be a valid Python identifier to make it easy to access, but this is not required. namespace : dict, optional When provided, inject the unit, and all of its aliases, in the given namespace dictionary. If a unit by the same name is already in the namespace, a ValueError is raised. doc : str, optional A docstring describing the unit. format : dict, optional A mapping to format-specific representations of this unit. For example, for the ``Ohm`` unit, it might be nice to have it displayed as ``\\Omega`` by the ``latex`` formatter. In that case, `format` argument should be set to:: {'latex': r'\\Omega'} Raises ------ ValueError If any of the given unit names are already in the registry. ValueError If any of the given unit names are not valid Python tokens. """ def __init__(self, st, doc=None, format=None, namespace=None): UnitBase.__init__(self) if isinstance(st, (bytes, str)): self._names = [st] self._short_names = [st] self._long_names = [] elif isinstance(st, tuple): if not len(st) == 2: raise ValueError("st must be string, list or 2-tuple") self._names = st[0] + [n for n in st[1] if n not in st[0]] if not len(self._names): raise ValueError("must provide at least one name") self._short_names = st[0][:] self._long_names = st[1][:] else: if len(st) == 0: raise ValueError( "st list must have at least one entry") self._names = st[:] self._short_names = [st[0]] self._long_names = st[1:] if format is None: format = {} self._format = format if doc is None: doc = self._generate_doc() else: doc = textwrap.dedent(doc) doc = textwrap.fill(doc) self.__doc__ = doc self._inject(namespace) def _generate_doc(self): """ Generate a docstring for the unit if the user didn't supply one. This is only used from the constructor and may be overridden in subclasses. """ names = self.names if len(self.names) > 1: return "{1} ({0})".format(*names[:2]) else: return names[0] def get_format_name(self, format): """ Get a name for this unit that is specific to a particular format. Uses the dictionary passed into the `format` kwarg in the constructor. Parameters ---------- format : str The name of the format Returns ------- name : str The name of the unit for the given format. """ return self._format.get(format, self.name) @property def names(self): """ Returns all of the names associated with this unit. """ return self._names @property def name(self): """ Returns the canonical (short) name associated with this unit. """ return self._names[0] @property def aliases(self): """ Returns the alias (long) names for this unit. """ return self._names[1:] @property def short_names(self): """ Returns all of the short names associated with this unit. """ return self._short_names @property def long_names(self): """ Returns all of the long names associated with this unit. """ return self._long_names def _inject(self, namespace=None): """ Injects the unit, and all of its aliases, in the given namespace dictionary. """ if namespace is None: return # Loop through all of the names first, to ensure all of them # are new, then add them all as a single "transaction" below. for name in self._names: if name in namespace and self != namespace[name]: raise ValueError( "Object with name {!r} already exists in " "given namespace ({!r}).".format( name, namespace[name])) for name in self._names: namespace[name] = self def _recreate_irreducible_unit(cls, names, registered): """ This is used to reconstruct units when passed around by multiprocessing. """ registry = get_current_unit_registry().registry if names[0] in registry: # If in local registry return that object. return registry[names[0]] else: # otherwise, recreate the unit. unit = cls(names) if registered: # If not in local registry but registered in origin registry, # enable unit in local registry. get_current_unit_registry().add_enabled_units([unit]) return unit class IrreducibleUnit(NamedUnit): """ Irreducible units are the units that all other units are defined in terms of. Examples are meters, seconds, kilograms, amperes, etc. There is only once instance of such a unit per type. """ def __reduce__(self): # When IrreducibleUnit objects are passed to other processes # over multiprocessing, they need to be recreated to be the # ones already in the subprocesses' namespace, not new # objects, or they will be considered "unconvertible". # Therefore, we have a custom pickler/unpickler that # understands how to recreate the Unit on the other side. registry = get_current_unit_registry().registry return (_recreate_irreducible_unit, (self.__class__, list(self.names), self.name in registry), self.__getstate__()) @property def represents(self): """The unit that this named unit represents. For an irreducible unit, that is always itself. """ return self def decompose(self, bases=set()): if len(bases) and self not in bases: for base in bases: try: scale = self._to(base) except UnitsError: pass else: if is_effectively_unity(scale): return base else: return CompositeUnit(scale, [base], [1], _error_check=False) raise UnitConversionError( f"Unit {self} can not be decomposed into the requested bases") return self class UnrecognizedUnit(IrreducibleUnit): """ A unit that did not parse correctly. This allows for round-tripping it as a string, but no unit operations actually work on it. Parameters ---------- st : str The name of the unit. """ # For UnrecognizedUnits, we want to use "standard" Python # pickling, not the special case that is used for # IrreducibleUnits. __reduce__ = object.__reduce__ def __repr__(self): return f"UnrecognizedUnit({str(self)})" def __bytes__(self): return self.name.encode('ascii', 'replace') def __str__(self): return self.name def to_string(self, format=None): return self.name def _unrecognized_operator(self, *args, **kwargs): raise ValueError( "The unit {!r} is unrecognized, so all arithmetic operations " "with it are invalid.".format(self.name)) __pow__ = __truediv__ = __rtruediv__ = __mul__ = __rmul__ = __lt__ = \ __gt__ = __le__ = __ge__ = __neg__ = _unrecognized_operator def __eq__(self, other): try: other = Unit(other, parse_strict='silent') except (ValueError, UnitsError, TypeError): return NotImplemented return isinstance(other, type(self)) and self.name == other.name def __ne__(self, other): return not (self == other) def is_equivalent(self, other, equivalencies=None): self._normalize_equivalencies(equivalencies) return self == other def _get_converter(self, other, equivalencies=None): self._normalize_equivalencies(equivalencies) raise ValueError( "The unit {!r} is unrecognized. It can not be converted " "to other units.".format(self.name)) def get_format_name(self, format): return self.name def is_unity(self): return False class _UnitMetaClass(type): """ This metaclass exists because the Unit constructor should sometimes return instances that already exist. This "overrides" the constructor before the new instance is actually created, so we can return an existing one. """ def __call__(self, s="", represents=None, format=None, namespace=None, doc=None, parse_strict='raise'): # Short-circuit if we're already a unit if hasattr(s, '_get_physical_type_id'): return s # turn possible Quantity input for s or represents into a Unit from .quantity import Quantity if isinstance(represents, Quantity): if is_effectively_unity(represents.value): represents = represents.unit else: represents = CompositeUnit(represents.value * represents.unit.scale, bases=represents.unit.bases, powers=represents.unit.powers, _error_check=False) if isinstance(s, Quantity): if is_effectively_unity(s.value): s = s.unit else: s = CompositeUnit(s.value * s.unit.scale, bases=s.unit.bases, powers=s.unit.powers, _error_check=False) # now decide what we really need to do; define derived Unit? if isinstance(represents, UnitBase): # This has the effect of calling the real __new__ and # __init__ on the Unit class. return super().__call__( s, represents, format=format, namespace=namespace, doc=doc) # or interpret a Quantity (now became unit), string or number? if isinstance(s, UnitBase): return s elif isinstance(s, (bytes, str)): if len(s.strip()) == 0: # Return the NULL unit return dimensionless_unscaled if format is None: format = unit_format.Generic f = unit_format.get_format(format) if isinstance(s, bytes): s = s.decode('ascii') try: return f.parse(s) except NotImplementedError: raise except Exception as e: if parse_strict == 'silent': pass else: # Deliberately not issubclass here. Subclasses # should use their name. if f is not unit_format.Generic: format_clause = f.name + ' ' else: format_clause = '' msg = ("'{}' did not parse as {}unit: {} " "If this is meant to be a custom unit, " "define it with 'u.def_unit'. To have it " "recognized inside a file reader or other code, " "enable it with 'u.add_enabled_units'. " "For details, see " "https://docs.astropy.org/en/latest/units/combining_and_defining.html" .format(s, format_clause, str(e))) if parse_strict == 'raise': raise ValueError(msg) elif parse_strict == 'warn': warnings.warn(msg, UnitsWarning) else: raise ValueError("'parse_strict' must be 'warn', " "'raise' or 'silent'") return UnrecognizedUnit(s) elif isinstance(s, (int, float, np.floating, np.integer)): return CompositeUnit(s, [], [], _error_check=False) elif isinstance(s, tuple): from .structured import StructuredUnit return StructuredUnit(s) elif s is None: raise TypeError("None is not a valid Unit") else: raise TypeError(f"{s} can not be converted to a Unit") class Unit(NamedUnit, metaclass=_UnitMetaClass): """ The main unit class. There are a number of different ways to construct a Unit, but always returns a `UnitBase` instance. If the arguments refer to an already-existing unit, that existing unit instance is returned, rather than a new one. - From a string:: Unit(s, format=None, parse_strict='silent') Construct from a string representing a (possibly compound) unit. The optional `format` keyword argument specifies the format the string is in, by default ``"generic"``. For a description of the available formats, see `astropy.units.format`. The optional ``parse_strict`` keyword controls what happens when an unrecognized unit string is passed in. It may be one of the following: - ``'raise'``: (default) raise a ValueError exception. - ``'warn'``: emit a Warning, and return an `UnrecognizedUnit` instance. - ``'silent'``: return an `UnrecognizedUnit` instance. - From a number:: Unit(number) Creates a dimensionless unit. - From a `UnitBase` instance:: Unit(unit) Returns the given unit unchanged. - From no arguments:: Unit() Returns the dimensionless unit. - The last form, which creates a new `Unit` is described in detail below. See also: https://docs.astropy.org/en/stable/units/ Parameters ---------- st : str or list of str The name of the unit. If a list, the first element is the canonical (short) name, and the rest of the elements are aliases. represents : UnitBase instance The unit that this named unit represents. doc : str, optional A docstring describing the unit. format : dict, optional A mapping to format-specific representations of this unit. For example, for the ``Ohm`` unit, it might be nice to have it displayed as ``\\Omega`` by the ``latex`` formatter. In that case, `format` argument should be set to:: {'latex': r'\\Omega'} namespace : dict, optional When provided, inject the unit (and all of its aliases) into the given namespace. Raises ------ ValueError If any of the given unit names are already in the registry. ValueError If any of the given unit names are not valid Python tokens. """ def __init__(self, st, represents=None, doc=None, format=None, namespace=None): represents = Unit(represents) self._represents = represents NamedUnit.__init__(self, st, namespace=namespace, doc=doc, format=format) @property def represents(self): """The unit that this named unit represents.""" return self._represents def decompose(self, bases=set()): return self._represents.decompose(bases=bases) def is_unity(self): return self._represents.is_unity() def __hash__(self): if self._hash is None: self._hash = hash((self.name, self._represents)) return self._hash @classmethod def _from_physical_type_id(cls, physical_type_id): # get string bases and powers from the ID tuple bases = [cls(base) for base, _ in physical_type_id] powers = [power for _, power in physical_type_id] if len(physical_type_id) == 1 and powers[0] == 1: unit = bases[0] else: unit = CompositeUnit(1, bases, powers, _error_check=False) return unit class PrefixUnit(Unit): """ A unit that is simply a SI-prefixed version of another unit. For example, ``mm`` is a `PrefixUnit` of ``.001 * m``. The constructor is the same as for `Unit`. """ class CompositeUnit(UnitBase): """ Create a composite unit using expressions of previously defined units. Direct use of this class is not recommended. Instead use the factory function `Unit` and arithmetic operators to compose units. Parameters ---------- scale : number A scaling factor for the unit. bases : sequence of `UnitBase` A sequence of units this unit is composed of. powers : sequence of numbers A sequence of powers (in parallel with ``bases``) for each of the base units. """ _decomposed_cache = None def __init__(self, scale, bases, powers, decompose=False, decompose_bases=set(), _error_check=True): # There are many cases internal to astropy.units where we # already know that all the bases are Unit objects, and the # powers have been validated. In those cases, we can skip the # error checking for performance reasons. When the private # kwarg `_error_check` is False, the error checking is turned # off. if _error_check: for base in bases: if not isinstance(base, UnitBase): raise TypeError( "bases must be sequence of UnitBase instances") powers = [validate_power(p) for p in powers] if not decompose and len(bases) == 1 and powers[0] >= 0: # Short-cut; with one unit there's nothing to expand and gather, # as that has happened already when creating the unit. But do only # positive powers, since for negative powers we need to re-sort. unit = bases[0] power = powers[0] if power == 1: scale *= unit.scale self._bases = unit.bases self._powers = unit.powers elif power == 0: self._bases = [] self._powers = [] else: scale *= unit.scale ** power self._bases = unit.bases self._powers = [operator.mul(*resolve_fractions(p, power)) for p in unit.powers] self._scale = sanitize_scale(scale) else: # Regular case: use inputs as preliminary scale, bases, and powers, # then "expand and gather" identical bases, sanitize the scale, &c. self._scale = scale self._bases = bases self._powers = powers self._expand_and_gather(decompose=decompose, bases=decompose_bases) def __repr__(self): if len(self._bases): return super().__repr__() else: if self._scale != 1.0: return f'Unit(dimensionless with a scale of {self._scale})' else: return 'Unit(dimensionless)' @property def scale(self): """ Return the scale of the composite unit. """ return self._scale @property def bases(self): """ Return the bases of the composite unit. """ return self._bases @property def powers(self): """ Return the powers of the composite unit. """ return self._powers def _expand_and_gather(self, decompose=False, bases=set()): def add_unit(unit, power, scale): if bases and unit not in bases: for base in bases: try: scale *= unit._to(base) ** power except UnitsError: pass else: unit = base break if unit in new_parts: a, b = resolve_fractions(new_parts[unit], power) new_parts[unit] = a + b else: new_parts[unit] = power return scale new_parts = {} scale = self._scale for b, p in zip(self._bases, self._powers): if decompose and b not in bases: b = b.decompose(bases=bases) if isinstance(b, CompositeUnit): scale *= b._scale ** p for b_sub, p_sub in zip(b._bases, b._powers): a, b = resolve_fractions(p_sub, p) scale = add_unit(b_sub, a * b, scale) else: scale = add_unit(b, p, scale) new_parts = [x for x in new_parts.items() if x[1] != 0] new_parts.sort(key=lambda x: (-x[1], getattr(x[0], 'name', ''))) self._bases = [x[0] for x in new_parts] self._powers = [x[1] for x in new_parts] self._scale = sanitize_scale(scale) def __copy__(self): """ For compatibility with python copy module. """ return CompositeUnit(self._scale, self._bases[:], self._powers[:]) def decompose(self, bases=set()): if len(bases) == 0 and self._decomposed_cache is not None: return self._decomposed_cache for base in self.bases: if (not isinstance(base, IrreducibleUnit) or (len(bases) and base not in bases)): break else: if len(bases) == 0: self._decomposed_cache = self return self x = CompositeUnit(self.scale, self.bases, self.powers, decompose=True, decompose_bases=bases) if len(bases) == 0: self._decomposed_cache = x return x def is_unity(self): unit = self.decompose() return len(unit.bases) == 0 and unit.scale == 1.0 si_prefixes = [ (['Y'], ['yotta'], 1e24), (['Z'], ['zetta'], 1e21), (['E'], ['exa'], 1e18), (['P'], ['peta'], 1e15), (['T'], ['tera'], 1e12), (['G'], ['giga'], 1e9), (['M'], ['mega'], 1e6), (['k'], ['kilo'], 1e3), (['h'], ['hecto'], 1e2), (['da'], ['deka', 'deca'], 1e1), (['d'], ['deci'], 1e-1), (['c'], ['centi'], 1e-2), (['m'], ['milli'], 1e-3), (['u'], ['micro'], 1e-6), (['n'], ['nano'], 1e-9), (['p'], ['pico'], 1e-12), (['f'], ['femto'], 1e-15), (['a'], ['atto'], 1e-18), (['z'], ['zepto'], 1e-21), (['y'], ['yocto'], 1e-24) ] binary_prefixes = [ (['Ki'], ['kibi'], 2. ** 10), (['Mi'], ['mebi'], 2. ** 20), (['Gi'], ['gibi'], 2. ** 30), (['Ti'], ['tebi'], 2. ** 40), (['Pi'], ['pebi'], 2. ** 50), (['Ei'], ['exbi'], 2. ** 60) ] def _add_prefixes(u, excludes=[], namespace=None, prefixes=False): """ Set up all of the standard metric prefixes for a unit. This function should not be used directly, but instead use the `prefixes` kwarg on `def_unit`. Parameters ---------- excludes : list of str, optional Any prefixes to exclude from creation to avoid namespace collisions. namespace : dict, optional When provided, inject the unit (and all of its aliases) into the given namespace dictionary. prefixes : list, optional When provided, it is a list of prefix definitions of the form: (short_names, long_tables, factor) """ if prefixes is True: prefixes = si_prefixes elif prefixes is False: prefixes = [] for short, full, factor in prefixes: names = [] format = {} for prefix in short: if prefix in excludes: continue for alias in u.short_names: names.append(prefix + alias) # This is a hack to use Greek mu as a prefix # for some formatters. if prefix == 'u': format['latex'] = r'\mu ' + u.get_format_name('latex') format['unicode'] = '\N{MICRO SIGN}' + u.get_format_name('unicode') for key, val in u._format.items(): format.setdefault(key, prefix + val) for prefix in full: if prefix in excludes: continue for alias in u.long_names: names.append(prefix + alias) if len(names): PrefixUnit(names, CompositeUnit(factor, [u], [1], _error_check=False), namespace=namespace, format=format) def def_unit(s, represents=None, doc=None, format=None, prefixes=False, exclude_prefixes=[], namespace=None): """ Factory function for defining new units. Parameters ---------- s : str or list of str The name of the unit. If a list, the first element is the canonical (short) name, and the rest of the elements are aliases. represents : UnitBase instance, optional The unit that this named unit represents. If not provided, a new `IrreducibleUnit` is created. doc : str, optional A docstring describing the unit. format : dict, optional A mapping to format-specific representations of this unit. For example, for the ``Ohm`` unit, it might be nice to have it displayed as ``\\Omega`` by the ``latex`` formatter. In that case, `format` argument should be set to:: {'latex': r'\\Omega'} prefixes : bool or list, optional When `True`, generate all of the SI prefixed versions of the unit as well. For example, for a given unit ``m``, will generate ``mm``, ``cm``, ``km``, etc. When a list, it is a list of prefix definitions of the form: (short_names, long_tables, factor) Default is `False`. This function always returns the base unit object, even if multiple scaled versions of the unit were created. exclude_prefixes : list of str, optional If any of the SI prefixes need to be excluded, they may be listed here. For example, ``Pa`` can be interpreted either as "petaannum" or "Pascal". Therefore, when defining the prefixes for ``a``, ``exclude_prefixes`` should be set to ``["P"]``. namespace : dict, optional When provided, inject the unit (and all of its aliases and prefixes), into the given namespace dictionary. Returns ------- unit : `~astropy.units.UnitBase` The newly-defined unit, or a matching unit that was already defined. """ if represents is not None: result = Unit(s, represents, namespace=namespace, doc=doc, format=format) else: result = IrreducibleUnit( s, namespace=namespace, doc=doc, format=format) if prefixes: _add_prefixes(result, excludes=exclude_prefixes, namespace=namespace, prefixes=prefixes) return result def _condition_arg(value): """ Validate value is acceptable for conversion purposes. Will convert into an array if not a scalar, and can be converted into an array Parameters ---------- value : int or float value, or sequence of such values Returns ------- Scalar value or numpy array Raises ------ ValueError If value is not as expected """ if isinstance(value, (np.ndarray, float, int, complex, np.void)): return value avalue = np.array(value) if avalue.dtype.kind not in ['i', 'f', 'c']: raise ValueError("Value not scalar compatible or convertible to " "an int, float, or complex array") return avalue def unit_scale_converter(val): """Function that just multiplies the value by unity. This is a separate function so it can be recognized and discarded in unit conversion. """ return 1. * _condition_arg(val) dimensionless_unscaled = CompositeUnit(1, [], [], _error_check=False) # Abbreviation of the above, see #1980 one = dimensionless_unscaled # Maintain error in old location for backward compatibility # TODO: Is this still needed? Should there be a deprecation warning? unit_format.fits.UnitScaleError = UnitScaleError
7949c579fc748fefe6871319c20c5c8d0f22468b2bc0fc5e40fa330504191b75
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package defines SI prefixed units that are required by the VOUnit standard but that are rarely used in practice and liable to lead to confusion (such as ``msolMass`` for milli-solar mass). They are in a separate module from `astropy.units.deprecated` because they need to be enabled by default for `astropy.units` to parse compliant VOUnit strings. As a result, e.g., ``Unit('msolMass')`` will just work, but to access the unit directly, use ``astropy.units.required_by_vounit.msolMass`` instead of the more typical idiom possible for the non-prefixed unit, ``astropy.units.solMass``. """ _ns = globals() def _initialize_module(): # Local imports to avoid polluting top-level namespace from . import astrophys, cgs from .core import _add_prefixes, def_unit _add_prefixes(astrophys.solMass, namespace=_ns, prefixes=True) _add_prefixes(astrophys.solRad, namespace=_ns, prefixes=True) _add_prefixes(astrophys.solLum, namespace=_ns, prefixes=True) _initialize_module() ########################################################################### # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. from .utils import generate_prefixonly_unit_summary as _generate_prefixonly_unit_summary from .utils import generate_unit_summary as _generate_unit_summary if __doc__ is not None: __doc__ += _generate_unit_summary(globals()) __doc__ += _generate_prefixonly_unit_summary(globals()) def _enable(): """ Enable the VOUnit-required extra units so they appear in results of `~astropy.units.UnitBase.find_equivalent_units` and `~astropy.units.UnitBase.compose`, and are recognized in the ``Unit('...')`` idiom. """ # Local import to avoid cyclical import # Local import to avoid polluting namespace import inspect from .core import add_enabled_units return add_enabled_units(inspect.getmodule(_enable)) # Because these are VOUnit mandated units, they start enabled (which is why the # function is hidden). _enable()
3da697df1a97f319f5df375bb55a1c93c087d055030f73e2f98b5ca2d01c9a20
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This subpackage contains classes and functions for defining and converting between different physical units. This code is adapted from the `pynbody <https://github.com/pynbody/pynbody>`_ units module written by Andrew Pontzen, who has granted the Astropy project permission to use the code under a BSD license. """ # Import order matters here -- circular dependencies abound! # Lots of things to import - go from more basic to advanced, so that # whatever advanced ones need generally has been imported already; # this also makes it easier to understand where most time is spent # (e.g., using python -X importtime). # isort: off from .core import * from .quantity import * from . import astrophys, cgs, misc, photometric, si from .function import units as function_units from .si import * from .astrophys import * from .photometric import * from .cgs import * from .physical import * from .function.units import * from .misc import * from .equivalencies import * from .function.core import * from .function.logarithmic import * from .decorators import * from .structured import * # isort: on del bases # Enable the set of default units. This notably does *not* include # Imperial units. set_enabled_units([si, cgs, astrophys, function_units, misc, photometric]) # ------------------------------------------------------------------------- def __getattr__(attr): if attr == "littleh": from astropy.units.astrophys import littleh return littleh elif attr == "with_H0": from astropy.units.equivalencies import with_H0 return with_H0 raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
cca7b78dbd4d33677eb70140a56c5f0df9476791a4ffbea4b74ed6d4f82f2de5
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package defines the astrophysics-specific units. They are also available in the `astropy.units` namespace. """ from astropy.constants import si as _si from . import si from .core import UnitBase, binary_prefixes, def_unit, set_enabled_units, si_prefixes # To ensure si units of the constants can be interpreted. set_enabled_units([si]) import numpy as _numpy _ns = globals() ########################################################################### # LENGTH def_unit((['AU', 'au'], ['astronomical_unit']), _si.au, namespace=_ns, prefixes=True, doc="astronomical unit: approximately the mean Earth--Sun " "distance.") def_unit(['pc', 'parsec'], _si.pc, namespace=_ns, prefixes=True, doc="parsec: approximately 3.26 light-years.") def_unit(['solRad', 'R_sun', 'Rsun'], _si.R_sun, namespace=_ns, doc="Solar radius", prefixes=False, format={'latex': r'R_{\odot}', 'unicode': 'R\N{SUN}'}) def_unit(['jupiterRad', 'R_jup', 'Rjup', 'R_jupiter', 'Rjupiter'], _si.R_jup, namespace=_ns, prefixes=False, doc="Jupiter radius", # LaTeX jupiter symbol requires wasysym format={'latex': r'R_{\rm J}', 'unicode': 'R\N{JUPITER}'}) def_unit(['earthRad', 'R_earth', 'Rearth'], _si.R_earth, namespace=_ns, prefixes=False, doc="Earth radius", # LaTeX earth symbol requires wasysym format={'latex': r'R_{\oplus}', 'unicode': 'R⊕'}) def_unit(['lyr', 'lightyear'], (_si.c * si.yr).to(si.m), namespace=_ns, prefixes=True, doc="Light year") def_unit(['lsec', 'lightsecond'], (_si.c * si.s).to(si.m), namespace=_ns, prefixes=False, doc="Light second") ########################################################################### # MASS def_unit(['solMass', 'M_sun', 'Msun'], _si.M_sun, namespace=_ns, prefixes=False, doc="Solar mass", format={'latex': r'M_{\odot}', 'unicode': 'M\N{SUN}'}) def_unit(['jupiterMass', 'M_jup', 'Mjup', 'M_jupiter', 'Mjupiter'], _si.M_jup, namespace=_ns, prefixes=False, doc="Jupiter mass", # LaTeX jupiter symbol requires wasysym format={'latex': r'M_{\rm J}', 'unicode': 'M\N{JUPITER}'}) def_unit(['earthMass', 'M_earth', 'Mearth'], _si.M_earth, namespace=_ns, prefixes=False, doc="Earth mass", # LaTeX earth symbol requires wasysym format={'latex': r'M_{\oplus}', 'unicode': 'M⊕'}) ########################################################################## # ENERGY # Here, explicitly convert the planck constant to 'eV s' since the constant # can override that to give a more precise value that takes into account # covariances between e and h. Eventually, this may also be replaced with # just `_si.Ryd.to(eV)`. def_unit(['Ry', 'rydberg'], (_si.Ryd * _si.c * _si.h.to(si.eV * si.s)).to(si.eV), namespace=_ns, prefixes=True, doc="Rydberg: Energy of a photon whose wavenumber is the Rydberg " "constant", format={'latex': r'R_{\infty}', 'unicode': 'R∞'}) ########################################################################### # ILLUMINATION def_unit(['solLum', 'L_sun', 'Lsun'], _si.L_sun, namespace=_ns, prefixes=False, doc="Solar luminance", format={'latex': r'L_{\odot}', 'unicode': 'L\N{SUN}'}) ########################################################################### # SPECTRAL DENSITY def_unit((['ph', 'photon'], ['photon']), format={'ogip': 'photon', 'vounit': 'photon'}, namespace=_ns, prefixes=True) def_unit(['Jy', 'Jansky', 'jansky'], 1e-26 * si.W / si.m ** 2 / si.Hz, namespace=_ns, prefixes=True, doc="Jansky: spectral flux density") def_unit(['R', 'Rayleigh', 'rayleigh'], (1e10 / (4 * _numpy.pi)) * ph * si.m ** -2 * si.s ** -1 * si.sr ** -1, namespace=_ns, prefixes=True, doc="Rayleigh: photon flux") ########################################################################### # EVENTS def_unit((['ct', 'count'], ['count']), format={'fits': 'count', 'ogip': 'count', 'vounit': 'count'}, namespace=_ns, prefixes=True, exclude_prefixes=['p']) def_unit(['adu'], namespace=_ns, prefixes=True) def_unit(['DN', 'dn'], namespace=_ns, prefixes=False) ########################################################################### # MISCELLANEOUS # Some of these are very FITS-specific and perhaps considered a mistake. # Maybe they should be moved into the FITS format class? # TODO: This is defined by the FITS standard as "relative to the sun". # Is that mass, volume, what? def_unit(['Sun'], namespace=_ns) def_unit(['chan'], namespace=_ns, prefixes=True) def_unit(['bin'], namespace=_ns, prefixes=True) def_unit(['beam'], namespace=_ns, prefixes=True) def_unit(['electron'], doc="Number of electrons", namespace=_ns, format={'latex': r'e^{-}', 'unicode': 'e⁻'}) ########################################################################### # CLEANUP del UnitBase del def_unit del si ########################################################################### # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. from .utils import generate_unit_summary as _generate_unit_summary if __doc__ is not None: __doc__ += _generate_unit_summary(globals()) # ------------------------------------------------------------------------- def __getattr__(attr): if attr == "littleh": import warnings from astropy.cosmology.units import littleh from astropy.utils.exceptions import AstropyDeprecationWarning warnings.warn( ("`littleh` is deprecated from module `astropy.units.astrophys` " "since astropy 5.0 and may be removed in a future version. " "Use `astropy.cosmology.units.littleh` instead."), AstropyDeprecationWarning) return littleh raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
b05ccc83531b67b13fbd15757de7875735a5c562111837ce80ee5a7f5bf4f157
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Defines the physical types that correspond to different units.""" import numbers import warnings from astropy.utils.exceptions import AstropyDeprecationWarning from . import imperial # noqa # Needed for backward namespace compat, see #11975 and #11977 from . import astrophys, cgs, core, misc, quantity, si __all__ = ["def_physical_type", "get_physical_type", "PhysicalType"] _units_and_physical_types = [ (core.dimensionless_unscaled, "dimensionless"), (si.m, "length"), (si.m ** 2, "area"), (si.m ** 3, "volume"), (si.s, "time"), (si.rad, "angle"), (si.sr, "solid angle"), (si.m / si.s, {"speed", "velocity"}), (si.m / si.s ** 2, "acceleration"), (si.Hz, "frequency"), (si.g, "mass"), (si.mol, "amount of substance"), (si.K, "temperature"), (si.W * si.m ** -1 * si.K ** -1, "thermal conductivity"), (si.J * si.K ** -1, {"heat capacity", "entropy"}), (si.J * si.K ** -1 * si.kg ** -1, {"specific heat capacity", "specific entropy"}), (si.N, "force"), (si.J, {"energy", "work", "torque"}), (si.J * si.m ** -2 * si.s ** -1, {"energy flux", "irradiance"}), (si.Pa, {"pressure", "energy density", "stress"}), (si.W, {"power", "radiant flux"}), (si.kg * si.m ** -3, "mass density"), (si.m ** 3 / si.kg, "specific volume"), (si.mol / si.m ** 3, "molar concentration"), (si.m ** 3 / si.mol, "molar volume"), (si.kg * si.m / si.s, {"momentum", "impulse"}), (si.kg * si.m ** 2 / si.s, {"angular momentum", "action"}), (si.rad / si.s, {"angular speed", "angular velocity", "angular frequency"}), (si.rad / si.s ** 2, "angular acceleration"), (si.rad / si.m, "plate scale"), (si.g / (si.m * si.s), "dynamic viscosity"), (si.m ** 2 / si.s, {"diffusivity", "kinematic viscosity"}), (si.m ** -1, "wavenumber"), (si.m ** -2, "column density"), (si.A, "electrical current"), (si.C, "electrical charge"), (si.V, "electrical potential"), (si.Ohm, {"electrical resistance", "electrical impedance", "electrical reactance"}), (si.Ohm * si.m, "electrical resistivity"), (si.S, "electrical conductance"), (si.S / si.m, "electrical conductivity"), (si.F, "electrical capacitance"), (si.C * si.m, "electrical dipole moment"), (si.A / si.m ** 2, "electrical current density"), (si.V / si.m, "electrical field strength"), (si.C / si.m ** 2, {"electrical flux density", "surface charge density", "polarization density"}, ), (si.C / si.m ** 3, "electrical charge density"), (si.F / si.m, "permittivity"), (si.Wb, "magnetic flux"), (si.T, "magnetic flux density"), (si.A / si.m, "magnetic field strength"), (si.m ** 2 * si.A, "magnetic moment"), (si.H / si.m, {"electromagnetic field strength", "permeability"}), (si.H, "inductance"), (si.cd, "luminous intensity"), (si.lm, "luminous flux"), (si.lx, {"luminous emittance", "illuminance"}), (si.W / si.sr, "radiant intensity"), (si.cd / si.m ** 2, "luminance"), (si.m ** -3 * si.s ** -1, "volumetric rate"), (astrophys.Jy, "spectral flux density"), (si.W * si.m ** 2 * si.Hz ** -1, "surface tension"), (si.J * si.m ** -3 * si.s ** -1, {"spectral flux density wav", "power density"}), (astrophys.photon / si.Hz / si.cm ** 2 / si.s, "photon flux density"), (astrophys.photon / si.AA / si.cm ** 2 / si.s, "photon flux density wav"), (astrophys.R, "photon flux"), (misc.bit, "data quantity"), (misc.bit / si.s, "bandwidth"), (cgs.Franklin, "electrical charge (ESU)"), (cgs.statampere, "electrical current (ESU)"), (cgs.Biot, "electrical current (EMU)"), (cgs.abcoulomb, "electrical charge (EMU)"), (si.m * si.s ** -3, {"jerk", "jolt"}), (si.m * si.s ** -4, {"snap", "jounce"}), (si.m * si.s ** -5, "crackle"), (si.m * si.s ** -6, {"pop", "pounce"}), (si.K / si.m, "temperature gradient"), (si.J / si.kg, "specific energy"), (si.mol * si.m ** -3 * si.s ** -1, "reaction rate"), (si.kg * si.m ** 2, "moment of inertia"), (si.mol / si.s, "catalytic activity"), (si.J * si.K ** -1 * si.mol ** -1, "molar heat capacity"), (si.mol / si.kg, "molality"), (si.m * si.s, "absement"), (si.m * si.s ** 2, "absity"), (si.m ** 3 / si.s, "volumetric flow rate"), (si.s ** -2, "frequency drift"), (si.Pa ** -1, "compressibility"), (astrophys.electron * si.m ** -3, "electron density"), (astrophys.electron * si.m ** -2 * si.s ** -1, "electron flux"), (si.kg / si.m ** 2, "surface mass density"), (si.W / si.m ** 2 / si.sr, "radiance"), (si.J / si.mol, "chemical potential"), (si.kg / si.m, "linear density"), (si.H ** -1, "magnetic reluctance"), (si.W / si.K, "thermal conductance"), (si.K / si.W, "thermal resistance"), (si.K * si.m / si.W, "thermal resistivity"), (si.N / si.s, "yank"), (si.S * si.m ** 2 / si.mol, "molar conductivity"), (si.m ** 2 / si.V / si.s, "electrical mobility"), (si.lumen / si.W, "luminous efficacy"), (si.m ** 2 / si.kg, {"opacity", "mass attenuation coefficient"}), (si.kg * si.m ** -2 * si.s ** -1, {"mass flux", "momentum density"}), (si.m ** -3, "number density"), (si.m ** -2 * si.s ** -1, "particle flux"), ] _physical_unit_mapping = {} _unit_physical_mapping = {} _name_physical_mapping = {} # mapping from attribute-accessible name (no spaces, etc.) to the actual name. _attrname_physical_mapping = {} def _physical_type_from_str(name): """ Return the `PhysicalType` instance associated with the name of a physical type. """ if name == "unknown": raise ValueError("cannot uniquely identify an 'unknown' physical type.") elif name in _attrname_physical_mapping: return _attrname_physical_mapping[name] # convert attribute-accessible elif name in _name_physical_mapping: return _name_physical_mapping[name] else: raise ValueError(f"{name!r} is not a known physical type.") def _replace_temperatures_with_kelvin(unit): """ If a unit contains a temperature unit besides kelvin, then replace that unit with kelvin. Temperatures cannot be converted directly between K, °F, °C, and °Ra, in particular since there would be different conversions for T and ΔT. However, each of these temperatures each represents the physical type. Replacing the different temperature units with kelvin allows the physical type to be treated consistently. """ physical_type_id = unit._get_physical_type_id() physical_type_id_components = [] substitution_was_made = False for base, power in physical_type_id: if base in ["deg_F", "deg_C", "deg_R"]: base = "K" substitution_was_made = True physical_type_id_components.append((base, power)) if substitution_was_made: return core.Unit._from_physical_type_id(tuple(physical_type_id_components)) else: return unit def _standardize_physical_type_names(physical_type_input): """ Convert a string or `set` of strings into a `set` containing string representations of physical types. The strings provided in ``physical_type_input`` can each contain multiple physical types that are separated by a regular slash. Underscores are treated as spaces so that variable names could be identical to physical type names. """ if isinstance(physical_type_input, str): physical_type_input = {physical_type_input} standardized_physical_types = set() for ptype_input in physical_type_input: if not isinstance(ptype_input, str): raise ValueError(f"expecting a string, but got {ptype_input}") input_set = set(ptype_input.split("/")) processed_set = {s.strip().replace("_", " ") for s in input_set} standardized_physical_types |= processed_set return standardized_physical_types class PhysicalType: """ Represents the physical type(s) that are dimensionally compatible with a set of units. Instances of this class should be accessed through either `get_physical_type` or by using the `~astropy.units.core.UnitBase.physical_type` attribute of units. This class is not intended to be instantiated directly in user code. Parameters ---------- unit : `~astropy.units.Unit` The unit to be represented by the physical type. physical_types : `str` or `set` of `str` A `str` representing the name of the physical type of the unit, or a `set` containing strings that represent one or more names of physical types. Notes ----- A physical type will be considered equal to an equivalent `PhysicalType` instance (recommended) or a string that contains a name of the physical type. The latter method is not recommended in packages, as the names of some physical types may change in the future. To maintain backwards compatibility, two physical type names may be included in one string if they are separated with a slash (e.g., ``"momentum/impulse"``). String representations of physical types may include underscores instead of spaces. Examples -------- `PhysicalType` instances may be accessed via the `~astropy.units.core.UnitBase.physical_type` attribute of units. >>> import astropy.units as u >>> u.meter.physical_type PhysicalType('length') `PhysicalType` instances may also be accessed by calling `get_physical_type`. This function will accept a unit, a string containing the name of a physical type, or the number one. >>> u.get_physical_type(u.m ** -3) PhysicalType('number density') >>> u.get_physical_type("volume") PhysicalType('volume') >>> u.get_physical_type(1) PhysicalType('dimensionless') Some units are dimensionally compatible with multiple physical types. A pascal is intended to represent pressure and stress, but the unit decomposition is equivalent to that of energy density. >>> pressure = u.get_physical_type("pressure") >>> pressure PhysicalType({'energy density', 'pressure', 'stress'}) >>> 'energy density' in pressure True Physical types can be tested for equality against other physical type objects or against strings that may contain the name of a physical type. >>> area = (u.m ** 2).physical_type >>> area == u.barn.physical_type True >>> area == "area" True Multiplication, division, and exponentiation are enabled so that physical types may be used for dimensional analysis. >>> length = u.pc.physical_type >>> area = (u.cm ** 2).physical_type >>> length * area PhysicalType('volume') >>> area / length PhysicalType('length') >>> length ** 3 PhysicalType('volume') may also be performed using a string that contains the name of a physical type. >>> "length" * area PhysicalType('volume') >>> "area" / length PhysicalType('length') Unknown physical types are labelled as ``"unknown"``. >>> (u.s ** 13).physical_type PhysicalType('unknown') Dimensional analysis may be performed for unknown physical types too. >>> length_to_19th_power = (u.m ** 19).physical_type >>> length_to_20th_power = (u.m ** 20).physical_type >>> length_to_20th_power / length_to_19th_power PhysicalType('length') """ def __init__(self, unit, physical_types): self._unit = _replace_temperatures_with_kelvin(unit) self._physical_type_id = self._unit._get_physical_type_id() self._physical_type = _standardize_physical_type_names(physical_types) self._physical_type_list = sorted(self._physical_type) def __iter__(self): yield from self._physical_type_list def __getattr__(self, attr): # TODO: remove this whole method when accessing str attributes from # physical types is no longer supported # short circuit attribute accessed in __str__ to prevent recursion if attr == '_physical_type_list': super().__getattribute__(attr) self_str_attr = getattr(str(self), attr, None) if hasattr(str(self), attr): warning_message = ( f"support for accessing str attributes such as {attr!r} " "from PhysicalType instances is deprecated since 4.3 " "and will be removed in a subsequent release.") warnings.warn(warning_message, AstropyDeprecationWarning) return self_str_attr else: super().__getattribute__(attr) # to get standard error message def __eq__(self, other): """ Return `True` if ``other`` represents a physical type that is consistent with the physical type of the `PhysicalType` instance. """ if isinstance(other, PhysicalType): return self._physical_type_id == other._physical_type_id elif isinstance(other, str): other = _standardize_physical_type_names(other) return other.issubset(self._physical_type) else: return NotImplemented def __ne__(self, other): equality = self.__eq__(other) return not equality if isinstance(equality, bool) else NotImplemented def _name_string_as_ordered_set(self): return "{" + str(self._physical_type_list)[1:-1] + "}" def __repr__(self): if len(self._physical_type) == 1: names = "'" + self._physical_type_list[0] + "'" else: names = self._name_string_as_ordered_set() return f"PhysicalType({names})" def __str__(self): return "/".join(self._physical_type_list) @staticmethod def _dimensionally_compatible_unit(obj): """ Return a unit that corresponds to the provided argument. If a unit is passed in, return that unit. If a physical type (or a `str` with the name of a physical type) is passed in, return a unit that corresponds to that physical type. If the number equal to ``1`` is passed in, return a dimensionless unit. Otherwise, return `NotImplemented`. """ if isinstance(obj, core.UnitBase): return _replace_temperatures_with_kelvin(obj) elif isinstance(obj, PhysicalType): return obj._unit elif isinstance(obj, numbers.Real) and obj == 1: return core.dimensionless_unscaled elif isinstance(obj, str): return _physical_type_from_str(obj)._unit else: return NotImplemented def _dimensional_analysis(self, other, operation): other_unit = self._dimensionally_compatible_unit(other) if other_unit is NotImplemented: return NotImplemented other_unit = _replace_temperatures_with_kelvin(other_unit) new_unit = getattr(self._unit, operation)(other_unit) return new_unit.physical_type def __mul__(self, other): return self._dimensional_analysis(other, "__mul__") def __rmul__(self, other): return self.__mul__(other) def __truediv__(self, other): return self._dimensional_analysis(other, "__truediv__") def __rtruediv__(self, other): other = self._dimensionally_compatible_unit(other) if other is NotImplemented: return NotImplemented return other.physical_type._dimensional_analysis(self, "__truediv__") def __pow__(self, power): return (self._unit ** power).physical_type def __hash__(self): return hash(self._physical_type_id) def __len__(self): return len(self._physical_type) # We need to prevent operations like where a Unit instance left # multiplies a PhysicalType instance from returning a `Quantity` # instance with a PhysicalType as the value. We can do this by # preventing np.array from casting a PhysicalType instance as # an object array. __array__ = None def def_physical_type(unit, name): """ Add a mapping between a unit and the corresponding physical type(s). If a physical type already exists for a unit, add new physical type names so long as those names are not already in use for other physical types. Parameters ---------- unit : `~astropy.units.Unit` The unit to be represented by the physical type. name : `str` or `set` of `str` A `str` representing the name of the physical type of the unit, or a `set` containing strings that represent one or more names of physical types. Raises ------ ValueError If a physical type name is already in use for another unit, or if attempting to name a unit as ``"unknown"``. """ physical_type_id = unit._get_physical_type_id() physical_type_names = _standardize_physical_type_names(name) if "unknown" in physical_type_names: raise ValueError("cannot uniquely define an unknown physical type") names_for_other_units = set(_unit_physical_mapping.keys()).difference( _physical_unit_mapping.get(physical_type_id, {})) names_already_in_use = physical_type_names & names_for_other_units if names_already_in_use: raise ValueError( f"the following physical type names are already in use: " f"{names_already_in_use}.") unit_already_in_use = physical_type_id in _physical_unit_mapping if unit_already_in_use: physical_type = _physical_unit_mapping[physical_type_id] physical_type_names |= set(physical_type) physical_type.__init__(unit, physical_type_names) else: physical_type = PhysicalType(unit, physical_type_names) _physical_unit_mapping[physical_type_id] = physical_type for ptype in physical_type: _unit_physical_mapping[ptype] = physical_type_id for ptype_name in physical_type_names: _name_physical_mapping[ptype_name] = physical_type # attribute-accessible name attr_name = ptype_name.replace(' ', '_').replace('(', '').replace(')', '') _attrname_physical_mapping[attr_name] = physical_type def get_physical_type(obj): """ Return the physical type that corresponds to a unit (or another physical type representation). Parameters ---------- obj : quantity-like or `~astropy.units.PhysicalType`-like An object that (implicitly or explicitly) has a corresponding physical type. This object may be a unit, a `~astropy.units.Quantity`, an object that can be converted to a `~astropy.units.Quantity` (such as a number or array), a string that contains a name of a physical type, or a `~astropy.units.PhysicalType` instance. Returns ------- `~astropy.units.PhysicalType` A representation of the physical type(s) of the unit. Examples -------- The physical type may be retrieved from a unit or a `~astropy.units.Quantity`. >>> import astropy.units as u >>> u.get_physical_type(u.meter ** -2) PhysicalType('column density') >>> u.get_physical_type(0.62 * u.barn * u.Mpc) PhysicalType('volume') The physical type may also be retrieved by providing a `str` that contains the name of a physical type. >>> u.get_physical_type("energy") PhysicalType({'energy', 'torque', 'work'}) Numbers and arrays of numbers correspond to a dimensionless physical type. >>> u.get_physical_type(1) PhysicalType('dimensionless') """ if isinstance(obj, PhysicalType): return obj if isinstance(obj, str): return _physical_type_from_str(obj) try: unit = obj if isinstance(obj, core.UnitBase) else quantity.Quantity(obj, copy=False).unit except TypeError as exc: raise TypeError(f"{obj} does not correspond to a physical type.") from exc unit = _replace_temperatures_with_kelvin(unit) physical_type_id = unit._get_physical_type_id() unit_has_known_physical_type = physical_type_id in _physical_unit_mapping if unit_has_known_physical_type: return _physical_unit_mapping[physical_type_id] else: return PhysicalType(unit, "unknown") # ------------------------------------------------------------------------------ # Script section creating the physical types and the documentation # define the physical types for unit, physical_type in _units_and_physical_types: def_physical_type(unit, physical_type) # For getting the physical types. def __getattr__(name): """Checks for physical types using lazy import. This also allows user-defined physical types to be accessible from the :mod:`astropy.units.physical` module. See `PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_ Parameters ---------- name : str The name of the attribute in this module. If it is already defined, then this function is not called. Returns ------- ptype : `~astropy.units.physical.PhysicalType` Raises ------ AttributeError If the ``name`` does not correspond to a physical type """ if name in _attrname_physical_mapping: return _attrname_physical_mapping[name] raise AttributeError(f"module {__name__!r} has no attribute {name!r}") def __dir__(): """Return contents directory (__all__ + all physical type names).""" return list(set(__all__) | set(_attrname_physical_mapping.keys())) # This generates a docstring addition for this module that describes all of the # standard physical types defined here. if __doc__ is not None: doclines = [ ".. list-table:: Defined Physical Types", " :header-rows: 1", " :widths: 30 10 50", "", " * - Physical type", " - Unit", " - Other physical type(s) with same unit"] for name in sorted(_name_physical_mapping.keys()): physical_type = _name_physical_mapping[name] doclines.extend([ f" * - _`{name}`", f" - :math:`{physical_type._unit.to_string('latex')[1:-1]}`", f" - {', '.join([n for n in physical_type if n != name])}"]) __doc__ += '\n\n' + '\n'.join(doclines) del unit, physical_type
53028260bf0167ebc9ce977da1a8a7c5acf9c79b36741a041db177964d49756a
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Miscellaneous utilities for `astropy.units`. None of the functions in the module are meant for use outside of the package. """ import io import re from fractions import Fraction import numpy as np from numpy import finfo _float_finfo = finfo(float) # take float here to ensure comparison with another float is fast # give a little margin since often multiple calculations happened _JUST_BELOW_UNITY = float(1.-4.*_float_finfo.epsneg) _JUST_ABOVE_UNITY = float(1.+4.*_float_finfo.eps) def _get_first_sentence(s): """ Get the first sentence from a string and remove any carriage returns. """ x = re.match(r".*?\S\.\s", s) if x is not None: s = x.group(0) return s.replace('\n', ' ') def _iter_unit_summary(namespace): """ Generates the ``(unit, doc, represents, aliases, prefixes)`` tuple used to format the unit summary docs in `generate_unit_summary`. """ from . import core # Get all of the units, and keep track of which ones have SI # prefixes units = [] has_prefixes = set() for key, val in namespace.items(): # Skip non-unit items if not isinstance(val, core.UnitBase): continue # Skip aliases if key != val.name: continue if isinstance(val, core.PrefixUnit): # This will return the root unit that is scaled by the prefix # attached to it has_prefixes.add(val._represents.bases[0].name) else: units.append(val) # Sort alphabetically, case insensitive units.sort(key=lambda x: x.name.lower()) for unit in units: doc = _get_first_sentence(unit.__doc__).strip() represents = '' if isinstance(unit, core.Unit): represents = f":math:`{unit._represents.to_string('latex')[1:-1]}`" aliases = ', '.join(f'``{x}``' for x in unit.aliases) yield (unit, doc, represents, aliases, 'Yes' if unit.name in has_prefixes else 'No') def generate_unit_summary(namespace): """ Generates a summary of units from a given namespace. This is used to generate the docstring for the modules that define the actual units. Parameters ---------- namespace : dict A namespace containing units. Returns ------- docstring : str A docstring containing a summary table of the units. """ docstring = io.StringIO() docstring.write(""" .. list-table:: Available Units :header-rows: 1 :widths: 10 20 20 20 1 * - Unit - Description - Represents - Aliases - SI Prefixes """) for unit_summary in _iter_unit_summary(namespace): docstring.write(""" * - ``{}`` - {} - {} - {} - {} """.format(*unit_summary)) return docstring.getvalue() def generate_prefixonly_unit_summary(namespace): """ Generates table entries for units in a namespace that are just prefixes without the base unit. Note that this is intended to be used *after* `generate_unit_summary` and therefore does not include the table header. Parameters ---------- namespace : dict A namespace containing units that are prefixes but do *not* have the base unit in their namespace. Returns ------- docstring : str A docstring containing a summary table of the units. """ from . import PrefixUnit faux_namespace = {} for nm, unit in namespace.items(): if isinstance(unit, PrefixUnit): base_unit = unit.represents.bases[0] faux_namespace[base_unit.name] = base_unit docstring = io.StringIO() for unit_summary in _iter_unit_summary(faux_namespace): docstring.write(""" * - Prefixes for ``{}`` - {} prefixes - {} - {} - Only """.format(*unit_summary)) return docstring.getvalue() def is_effectively_unity(value): # value is *almost* always real, except, e.g., for u.mag**0.5, when # it will be complex. Use try/except to ensure normal case is fast try: return _JUST_BELOW_UNITY <= value <= _JUST_ABOVE_UNITY except TypeError: # value is complex return (_JUST_BELOW_UNITY <= value.real <= _JUST_ABOVE_UNITY and _JUST_BELOW_UNITY <= value.imag + 1 <= _JUST_ABOVE_UNITY) def sanitize_scale(scale): if is_effectively_unity(scale): return 1.0 # Maximum speed for regular case where scale is a float. if scale.__class__ is float: return scale # We cannot have numpy scalars, since they don't autoconvert to # complex if necessary. They are also slower. if hasattr(scale, 'dtype'): scale = scale.item() # All classes that scale can be (int, float, complex, Fraction) # have an "imag" attribute. if scale.imag: if abs(scale.real) > abs(scale.imag): if is_effectively_unity(scale.imag/scale.real + 1): return scale.real elif is_effectively_unity(scale.real/scale.imag + 1): return complex(0., scale.imag) return scale else: return scale.real def maybe_simple_fraction(p, max_denominator=100): """Fraction very close to x with denominator at most max_denominator. The fraction has to be such that fraction/x is unity to within 4 ulp. If such a fraction does not exist, returns the float number. The algorithm is that of `fractions.Fraction.limit_denominator`, but sped up by not creating a fraction to start with. """ if p == 0 or p.__class__ is int: return p n, d = p.as_integer_ratio() a = n // d # Normally, start with 0,1 and 1,0; here we have applied first iteration. n0, d0 = 1, 0 n1, d1 = a, 1 while d1 <= max_denominator: if _JUST_BELOW_UNITY <= n1/(d1*p) <= _JUST_ABOVE_UNITY: return Fraction(n1, d1) n, d = d, n-a*d a = n // d n0, n1 = n1, n0+a*n1 d0, d1 = d1, d0+a*d1 return p def validate_power(p): """Convert a power to a floating point value, an integer, or a Fraction. If a fractional power can be represented exactly as a floating point number, convert it to a float, to make the math much faster; otherwise, retain it as a `fractions.Fraction` object to avoid losing precision. Conversely, if the value is indistinguishable from a rational number with a low-numbered denominator, convert to a Fraction object. Parameters ---------- p : float, int, Rational, Fraction Power to be converted """ denom = getattr(p, 'denominator', None) if denom is None: try: p = float(p) except Exception: if not np.isscalar(p): raise ValueError("Quantities and Units may only be raised " "to a scalar power") else: raise # This returns either a (simple) Fraction or the same float. p = maybe_simple_fraction(p) # If still a float, nothing more to be done. if isinstance(p, float): return p # Otherwise, check for simplifications. denom = p.denominator if denom == 1: p = p.numerator elif (denom & (denom - 1)) == 0: # Above is a bit-twiddling hack to see if denom is a power of two. # If so, float does not lose precision and will speed things up. p = float(p) return p def resolve_fractions(a, b): """ If either input is a Fraction, convert the other to a Fraction (at least if it does not have a ridiculous denominator). This ensures that any operation involving a Fraction will use rational arithmetic and preserve precision. """ # We short-circuit on the most common cases of int and float, since # isinstance(a, Fraction) is very slow for any non-Fraction instances. a_is_fraction = (a.__class__ is not int and a.__class__ is not float and isinstance(a, Fraction)) b_is_fraction = (b.__class__ is not int and b.__class__ is not float and isinstance(b, Fraction)) if a_is_fraction and not b_is_fraction: b = maybe_simple_fraction(b) elif not a_is_fraction and b_is_fraction: a = maybe_simple_fraction(a) return a, b def quantity_asanyarray(a, dtype=None): from .quantity import Quantity if not isinstance(a, np.ndarray) and not np.isscalar(a) and any(isinstance(x, Quantity) for x in a): return Quantity(a, dtype=dtype) else: return np.asanyarray(a, dtype=dtype)
4c06a84d0a5c2bfa48147f7a4d03ebe8bc3feb2ba3132029b54959224e9c7324
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package defines the CGS units. They are also available in the top-level `astropy.units` namespace. """ from fractions import Fraction from . import si from .core import UnitBase, def_unit _ns = globals() def_unit(['cm', 'centimeter'], si.cm, namespace=_ns, prefixes=False) g = si.g s = si.s C = si.C rad = si.rad sr = si.sr cd = si.cd K = si.K deg_C = si.deg_C mol = si.mol ########################################################################## # ACCELERATION def_unit(['Gal', 'gal'], cm / s ** 2, namespace=_ns, prefixes=True, doc="Gal: CGS unit of acceleration") ########################################################################## # ENERGY # Use CGS definition of erg def_unit(['erg'], g * cm ** 2 / s ** 2, namespace=_ns, prefixes=True, doc="erg: CGS unit of energy") ########################################################################## # FORCE def_unit(['dyn', 'dyne'], g * cm / s ** 2, namespace=_ns, prefixes=True, doc="dyne: CGS unit of force") ########################################################################## # PRESSURE def_unit(['Ba', 'Barye', 'barye'], g / (cm * s ** 2), namespace=_ns, prefixes=True, doc="Barye: CGS unit of pressure") ########################################################################## # DYNAMIC VISCOSITY def_unit(['P', 'poise'], g / (cm * s), namespace=_ns, prefixes=True, doc="poise: CGS unit of dynamic viscosity") ########################################################################## # KINEMATIC VISCOSITY def_unit(['St', 'stokes'], cm ** 2 / s, namespace=_ns, prefixes=True, doc="stokes: CGS unit of kinematic viscosity") ########################################################################## # WAVENUMBER def_unit(['k', 'Kayser', 'kayser'], cm ** -1, namespace=_ns, prefixes=True, doc="kayser: CGS unit of wavenumber") ########################################################################### # ELECTRICAL def_unit(['D', 'Debye', 'debye'], Fraction(1, 3) * 1e-29 * C * si.m, namespace=_ns, prefixes=True, doc="Debye: CGS unit of electric dipole moment") def_unit(['Fr', 'Franklin', 'statcoulomb', 'statC', 'esu'], g ** Fraction(1, 2) * cm ** Fraction(3, 2) * s ** -1, namespace=_ns, doc='Franklin: CGS (ESU) unit of charge') def_unit(['statA', 'statampere'], Fr * s ** -1, namespace=_ns, doc='statampere: CGS (ESU) unit of current') def_unit(['Bi', 'Biot', 'abA', 'abampere'], g ** Fraction(1, 2) * cm ** Fraction(1, 2) * s ** -1, namespace=_ns, doc='Biot: CGS (EMU) unit of current') def_unit(['abC', 'abcoulomb'], Bi * s, namespace=_ns, doc='abcoulomb: CGS (EMU) of charge') ########################################################################### # MAGNETIC def_unit(['G', 'Gauss', 'gauss'], 1e-4 * si.T, namespace=_ns, prefixes=True, doc="Gauss: CGS unit for magnetic field") def_unit(['Mx', 'Maxwell', 'maxwell'], 1e-8 * si.Wb, namespace=_ns, doc="Maxwell: CGS unit for magnetic flux") ########################################################################### # BASES bases = set([cm, g, s, rad, cd, K, mol]) ########################################################################### # CLEANUP del UnitBase del def_unit del si del Fraction ########################################################################### # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. from .utils import generate_unit_summary as _generate_unit_summary if __doc__ is not None: __doc__ += _generate_unit_summary(globals())
912b183eb9538b325a17434bbeee274836969c13d995a4d3b114a974cd791272
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package defines deprecated units. These units are not available in the top-level `astropy.units` namespace. To use these units, you must import the `astropy.units.deprecated` module:: >>> from astropy.units import deprecated >>> q = 10. * deprecated.emu # doctest: +SKIP To include them in `~astropy.units.UnitBase.compose` and the results of `~astropy.units.UnitBase.find_equivalent_units`, do:: >>> from astropy.units import deprecated >>> deprecated.enable() # doctest: +SKIP """ _ns = globals() def _initialize_module(): # Local imports to avoid polluting top-level namespace from . import astrophys, cgs from .core import _add_prefixes, def_unit def_unit(['emu'], cgs.Bi, namespace=_ns, doc='Biot: CGS (EMU) unit of current') # Add only some *prefixes* as deprecated units. _add_prefixes(astrophys.jupiterMass, namespace=_ns, prefixes=True) _add_prefixes(astrophys.earthMass, namespace=_ns, prefixes=True) _add_prefixes(astrophys.jupiterRad, namespace=_ns, prefixes=True) _add_prefixes(astrophys.earthRad, namespace=_ns, prefixes=True) _initialize_module() ########################################################################### # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. from .utils import generate_prefixonly_unit_summary as _generate_prefixonly_unit_summary from .utils import generate_unit_summary as _generate_unit_summary if __doc__ is not None: __doc__ += _generate_unit_summary(globals()) __doc__ += _generate_prefixonly_unit_summary(globals()) def enable(): """ Enable deprecated units so they appear in results of `~astropy.units.UnitBase.find_equivalent_units` and `~astropy.units.UnitBase.compose`. This may be used with the ``with`` statement to enable deprecated units only temporarily. """ import inspect # Local import to avoid cyclical import # Local import to avoid polluting namespace from .core import add_enabled_units return add_enabled_units(inspect.getmodule(enable))
afcd413718c5fec5423cb4dfb15cb8ed2f180863df94df42d55defff7c5bea80
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module defines structured units and quantities. """ # Standard library import operator import numpy as np from .core import UNITY, Unit, UnitBase __all__ = ['StructuredUnit'] DTYPE_OBJECT = np.dtype('O') def _names_from_dtype(dtype): """Recursively extract field names from a dtype.""" names = [] for name in dtype.names: subdtype = dtype.fields[name][0] if subdtype.names: names.append([name, _names_from_dtype(subdtype)]) else: names.append(name) return tuple(names) def _normalize_names(names): """Recursively normalize, inferring upper level names for unadorned tuples. Generally, we want the field names to be organized like dtypes, as in ``(['pv', ('p', 'v')], 't')``. But we automatically infer upper field names if the list is absent from items like ``(('p', 'v'), 't')``, by concatenating the names inside the tuple. """ result = [] for name in names: if isinstance(name, str) and len(name) > 0: result.append(name) elif (isinstance(name, list) and len(name) == 2 and isinstance(name[0], str) and len(name[0]) > 0 and isinstance(name[1], tuple) and len(name[1]) > 0): result.append([name[0], _normalize_names(name[1])]) elif isinstance(name, tuple) and len(name) > 0: new_tuple = _normalize_names(name) result.append([''.join([(i[0] if isinstance(i, list) else i) for i in new_tuple]), new_tuple]) else: raise ValueError(f'invalid entry {name!r}. Should be a name, ' 'tuple of names, or 2-element list of the ' 'form [name, tuple of names].') return tuple(result) class StructuredUnit: """Container for units for a structured Quantity. Parameters ---------- units : unit-like, tuple of unit-like, or `~astropy.units.StructuredUnit` Tuples can be nested. If a `~astropy.units.StructuredUnit` is passed in, it will be returned unchanged unless different names are requested. names : tuple of str, tuple or list; `~numpy.dtype`; or `~astropy.units.StructuredUnit`, optional Field names for the units, possibly nested. Can be inferred from a structured `~numpy.dtype` or another `~astropy.units.StructuredUnit`. For nested tuples, by default the name of the upper entry will be the concatenation of the names of the lower levels. One can pass in a list with the upper-level name and a tuple of lower-level names to avoid this. For tuples, not all levels have to be given; for any level not passed in, default field names of 'f0', 'f1', etc., will be used. Notes ----- It is recommended to initialze the class indirectly, using `~astropy.units.Unit`. E.g., ``u.Unit('AU,AU/day')``. When combined with a structured array to produce a structured `~astropy.units.Quantity`, array field names will take precedence. Generally, passing in ``names`` is needed only if the unit is used unattached to a `~astropy.units.Quantity` and one needs to access its fields. Examples -------- Various ways to initialize a `~astropy.units.StructuredUnit`:: >>> import astropy.units as u >>> su = u.Unit('(AU,AU/day),yr') >>> su Unit("((AU, AU / d), yr)") >>> su.field_names (['f0', ('f0', 'f1')], 'f1') >>> su['f1'] Unit("yr") >>> su2 = u.StructuredUnit(((u.AU, u.AU/u.day), u.yr), names=(('p', 'v'), 't')) >>> su2 == su True >>> su2.field_names (['pv', ('p', 'v')], 't') >>> su3 = u.StructuredUnit((su2['pv'], u.day), names=(['p_v', ('p', 'v')], 't')) >>> su3.field_names (['p_v', ('p', 'v')], 't') >>> su3.keys() ('p_v', 't') >>> su3.values() (Unit("(AU, AU / d)"), Unit("d")) Structured units share most methods with regular units:: >>> su.physical_type ((PhysicalType('length'), PhysicalType({'speed', 'velocity'})), PhysicalType('time')) >>> su.si Unit("((1.49598e+11 m, 1.73146e+06 m / s), 3.15576e+07 s)") """ def __new__(cls, units, names=None): dtype = None if names is not None: if isinstance(names, StructuredUnit): dtype = names._units.dtype names = names.field_names elif isinstance(names, np.dtype): if not names.fields: raise ValueError('dtype should be structured, with fields.') dtype = np.dtype([(name, DTYPE_OBJECT) for name in names.names]) names = _names_from_dtype(names) else: if not isinstance(names, tuple): names = (names,) names = _normalize_names(names) if not isinstance(units, tuple): units = Unit(units) if isinstance(units, StructuredUnit): # Avoid constructing a new StructuredUnit if no field names # are given, or if all field names are the same already anyway. if names is None or units.field_names == names: return units # Otherwise, turn (the upper level) into a tuple, for renaming. units = units.values() else: # Single regular unit: make a tuple for iteration below. units = (units,) if names is None: names = tuple(f'f{i}' for i in range(len(units))) elif len(units) != len(names): raise ValueError("lengths of units and field names must match.") converted = [] for unit, name in zip(units, names): if isinstance(name, list): # For list, the first item is the name of our level, # and the second another tuple of names, i.e., we recurse. unit = cls(unit, name[1]) name = name[0] else: # We are at the lowest level. Check unit. unit = Unit(unit) if dtype is not None and isinstance(unit, StructuredUnit): raise ValueError("units do not match in depth with field " "names from dtype or structured unit.") converted.append(unit) self = super().__new__(cls) if dtype is None: dtype = np.dtype([((name[0] if isinstance(name, list) else name), DTYPE_OBJECT) for name in names]) # Decay array to void so we can access by field name and number. self._units = np.array(tuple(converted), dtype)[()] return self def __getnewargs__(self): """When de-serializing, e.g. pickle, start with a blank structure.""" return (), None @property def field_names(self): """Possibly nested tuple of the field names of the parts.""" return tuple(([name, unit.field_names] if isinstance(unit, StructuredUnit) else name) for name, unit in self.items()) # Allow StructuredUnit to be treated as an (ordered) mapping. def __len__(self): return len(self._units.dtype.names) def __getitem__(self, item): # Since we are based on np.void, indexing by field number works too. return self._units[item] def values(self): return self._units.item() def keys(self): return self._units.dtype.names def items(self): return tuple(zip(self._units.dtype.names, self._units.item())) def __iter__(self): yield from self._units.dtype.names # Helpers for methods below. def _recursively_apply(self, func, cls=None): """Apply func recursively. Parameters ---------- func : callable Function to apply to all parts of the structured unit, recursing as needed. cls : type, optional If given, should be a subclass of `~numpy.void`. By default, will return a new `~astropy.units.StructuredUnit` instance. """ results = np.array(tuple([func(part) for part in self.values()]), self._units.dtype)[()] if cls is not None: return results.view((cls, results.dtype)) # Short-cut; no need to interpret field names, etc. result = super().__new__(self.__class__) result._units = results return result def _recursively_get_dtype(self, value, enter_lists=True): """Get structured dtype according to value, using our field names. This is useful since ``np.array(value)`` would treat tuples as lower levels of the array, rather than as elements of a structured array. The routine does presume that the type of the first tuple is representative of the rest. Used in ``_get_converter``. For the special value of ``UNITY``, all fields are assumed to be 1.0, and hence this will return an all-float dtype. """ if enter_lists: while isinstance(value, list): value = value[0] if value is UNITY: value = (UNITY,) * len(self) elif not isinstance(value, tuple) or len(self) != len(value): raise ValueError(f"cannot interpret value {value} for unit {self}.") descr = [] for (name, unit), part in zip(self.items(), value): if isinstance(unit, StructuredUnit): descr.append( (name, unit._recursively_get_dtype(part, enter_lists=False))) else: # Got a part associated with a regular unit. Gets its dtype. # Like for Quantity, we cast integers to float. part = np.array(part) part_dtype = part.dtype if part_dtype.kind in 'iu': part_dtype = np.dtype(float) descr.append((name, part_dtype, part.shape)) return np.dtype(descr) @property def si(self): """The `StructuredUnit` instance in SI units.""" return self._recursively_apply(operator.attrgetter('si')) @property def cgs(self): """The `StructuredUnit` instance in cgs units.""" return self._recursively_apply(operator.attrgetter('cgs')) # Needed to pass through Unit initializer, so might as well use it. def _get_physical_type_id(self): return self._recursively_apply( operator.methodcaller('_get_physical_type_id'), cls=Structure) @property def physical_type(self): """Physical types of all the fields.""" return self._recursively_apply( operator.attrgetter('physical_type'), cls=Structure) def decompose(self, bases=set()): """The `StructuredUnit` composed of only irreducible units. Parameters ---------- bases : sequence of `~astropy.units.UnitBase`, optional The bases to decompose into. When not provided, decomposes down to any irreducible units. When provided, the decomposed result will only contain the given units. This will raises a `UnitsError` if it's not possible to do so. Returns ------- `~astropy.units.StructuredUnit` With the unit for each field containing only irreducible units. """ return self._recursively_apply( operator.methodcaller('decompose', bases=bases)) def is_equivalent(self, other, equivalencies=[]): """`True` if all fields are equivalent to the other's fields. Parameters ---------- other : `~astropy.units.StructuredUnit` The structured unit to compare with, or what can initialize one. equivalencies : list of tuple, optional A list of equivalence pairs to try if the units are not directly convertible. See :ref:`unit_equivalencies`. The list will be applied to all fields. Returns ------- bool """ try: other = StructuredUnit(other) except Exception: return False if len(self) != len(other): return False for self_part, other_part in zip(self.values(), other.values()): if not self_part.is_equivalent(other_part, equivalencies=equivalencies): return False return True def _get_converter(self, other, equivalencies=[]): if not isinstance(other, type(self)): other = self.__class__(other, names=self) converters = [self_part._get_converter(other_part, equivalencies=equivalencies) for (self_part, other_part) in zip(self.values(), other.values())] def converter(value): if not hasattr(value, 'dtype'): value = np.array(value, self._recursively_get_dtype(value)) result = np.empty_like(value) for name, converter_ in zip(result.dtype.names, converters): result[name] = converter_(value[name]) # Index with empty tuple to decay array scalars to numpy void. return result if result.shape else result[()] return converter def to(self, other, value=np._NoValue, equivalencies=[]): """Return values converted to the specified unit. Parameters ---------- other : `~astropy.units.StructuredUnit` The unit to convert to. If necessary, will be converted to a `~astropy.units.StructuredUnit` using the dtype of ``value``. value : array-like, optional Value(s) in the current unit to be converted to the specified unit. If a sequence, the first element must have entries of the correct type to represent all elements (i.e., not have, e.g., a ``float`` where other elements have ``complex``). If not given, assumed to have 1. in all fields. equivalencies : list of tuple, optional A list of equivalence pairs to try if the units are not directly convertible. See :ref:`unit_equivalencies`. This list is in addition to possible global defaults set by, e.g., `set_enabled_equivalencies`. Use `None` to turn off all equivalencies. Returns ------- values : scalar or array Converted value(s). Raises ------ UnitsError If units are inconsistent """ if value is np._NoValue: # We do not have UNITY as a default, since then the docstring # would list 1.0 as default, yet one could not pass that in. value = UNITY return self._get_converter(other, equivalencies=equivalencies)(value) def to_string(self, format='generic'): """Output the unit in the given format as a string. Units are separated by commas. Parameters ---------- format : `astropy.units.format.Base` instance or str The name of a format or a formatter object. If not provided, defaults to the generic format. Notes ----- Structured units can be written to all formats, but can be re-read only with 'generic'. """ parts = [part.to_string(format) for part in self.values()] out_fmt = '({})' if len(self) > 1 else '({},)' if format.startswith('latex'): # Strip $ from parts and add them on the outside. parts = [part[1:-1] for part in parts] out_fmt = '$' + out_fmt + '$' return out_fmt.format(', '.join(parts)) def _repr_latex_(self): return self.to_string('latex') __array_ufunc__ = None def __mul__(self, other): if isinstance(other, str): try: other = Unit(other, parse_strict='silent') except Exception: return NotImplemented if isinstance(other, UnitBase): new_units = tuple(part * other for part in self.values()) return self.__class__(new_units, names=self) if isinstance(other, StructuredUnit): return NotImplemented # Anything not like a unit, try initialising as a structured quantity. try: from .quantity import Quantity return Quantity(other, unit=self) except Exception: return NotImplemented def __rmul__(self, other): return self.__mul__(other) def __truediv__(self, other): if isinstance(other, str): try: other = Unit(other, parse_strict='silent') except Exception: return NotImplemented if isinstance(other, UnitBase): new_units = tuple(part / other for part in self.values()) return self.__class__(new_units, names=self) return NotImplemented def __rlshift__(self, m): try: from .quantity import Quantity return Quantity(m, self, copy=False, subok=True) except Exception: return NotImplemented def __str__(self): return self.to_string() def __repr__(self): return f'Unit("{self.to_string()}")' def __eq__(self, other): try: other = StructuredUnit(other) except Exception: return NotImplemented return self.values() == other.values() def __ne__(self, other): if not isinstance(other, type(self)): try: other = StructuredUnit(other) except Exception: return NotImplemented return self.values() != other.values() class Structure(np.void): """Single element structure for physical type IDs, etc. Behaves like a `~numpy.void` and thus mostly like a tuple which can also be indexed with field names, but overrides ``__eq__`` and ``__ne__`` to compare only the contents, not the field names. Furthermore, this way no `FutureWarning` about comparisons is given. """ # Note that it is important for physical type IDs to not be stored in a # tuple, since then the physical types would be treated as alternatives in # :meth:`~astropy.units.UnitBase.is_equivalent`. (Of course, in that # case, they could also not be indexed by name.) def __eq__(self, other): if isinstance(other, np.void): other = other.item() return self.item() == other def __ne__(self, other): if isinstance(other, np.void): other = other.item() return self.item() != other
d487d964aa6b727aa64d0601d052b64bf1c831acb33887088802530fd17a7c45
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module defines the `Quantity` object, which represents a number with some associated units. `Quantity` objects support operations like ordinary numbers, but will deal with unit conversions internally. """ # STDLIB import numbers import operator import re import warnings from fractions import Fraction # THIRD PARTY import numpy as np # LOCAL from astropy import config as _config from astropy.utils.compat import NUMPY_LT_1_20, NUMPY_LT_1_22 from astropy.utils.compat.misc import override__dir__ from astropy.utils.data_info import ParentDtypeInfo from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning from astropy.utils.misc import isiterable from .core import ( Unit, UnitBase, UnitConversionError, UnitsError, UnitTypeError, dimensionless_unscaled, get_current_unit_registry) from .format.latex import Latex from .quantity_helper import can_have_arbitrary_unit, check_output, converters_and_unit from .quantity_helper.function_helpers import ( DISPATCHED_FUNCTIONS, FUNCTION_HELPERS, SUBCLASS_SAFE_FUNCTIONS, UNSUPPORTED_FUNCTIONS) from .structured import StructuredUnit from .utils import is_effectively_unity __all__ = ["Quantity", "SpecificTypeQuantity", "QuantityInfoBase", "QuantityInfo", "allclose", "isclose"] # We don't want to run doctests in the docstrings we inherit from Numpy __doctest_skip__ = ['Quantity.*'] _UNIT_NOT_INITIALISED = "(Unit not initialised)" _UFUNCS_FILTER_WARNINGS = {np.arcsin, np.arccos, np.arccosh, np.arctanh} class Conf(_config.ConfigNamespace): """ Configuration parameters for Quantity """ latex_array_threshold = _config.ConfigItem(100, 'The maximum size an array Quantity can be before its LaTeX ' 'representation for IPython gets "summarized" (meaning only the first ' 'and last few elements are shown with "..." between). Setting this to a ' 'negative number means that the value will instead be whatever numpy ' 'gets from get_printoptions.') conf = Conf() class QuantityIterator: """ Flat iterator object to iterate over Quantities A `QuantityIterator` iterator is returned by ``q.flat`` for any Quantity ``q``. It allows iterating over the array as if it were a 1-D array, either in a for-loop or by calling its `next` method. Iteration is done in C-contiguous style, with the last index varying the fastest. The iterator can also be indexed using basic slicing or advanced indexing. See Also -------- Quantity.flatten : Returns a flattened copy of an array. Notes ----- `QuantityIterator` is inspired by `~numpy.ma.core.MaskedIterator`. It is not exported by the `~astropy.units` module. Instead of instantiating a `QuantityIterator` directly, use `Quantity.flat`. """ def __init__(self, q): self._quantity = q self._dataiter = q.view(np.ndarray).flat def __iter__(self): return self def __getitem__(self, indx): out = self._dataiter.__getitem__(indx) # For single elements, ndarray.flat.__getitem__ returns scalars; these # need a new view as a Quantity. if isinstance(out, type(self._quantity)): return out else: return self._quantity._new_view(out) def __setitem__(self, index, value): self._dataiter[index] = self._quantity._to_own_unit(value) def __next__(self): """ Return the next value, or raise StopIteration. """ out = next(self._dataiter) # ndarray.flat._dataiter returns scalars, so need a view as a Quantity. return self._quantity._new_view(out) next = __next__ def __len__(self): return len(self._dataiter) #### properties and methods to match `numpy.ndarray.flatiter` #### @property def base(self): """A reference to the array that is iterated over.""" return self._quantity @property def coords(self): """An N-dimensional tuple of current coordinates.""" return self._dataiter.coords @property def index(self): """Current flat index into the array.""" return self._dataiter.index def copy(self): """Get a copy of the iterator as a 1-D array.""" return self._quantity.flatten() class QuantityInfoBase(ParentDtypeInfo): # This is on a base class rather than QuantityInfo directly, so that # it can be used for EarthLocationInfo yet make clear that that class # should not be considered a typical Quantity subclass by Table. attrs_from_parent = {'dtype', 'unit'} # dtype and unit taken from parent _supports_indexing = True @staticmethod def default_format(val): return f'{val.value}' @staticmethod def possible_string_format_functions(format_): """Iterate through possible string-derived format functions. A string can either be a format specifier for the format built-in, a new-style format string, or an old-style format string. This method is overridden in order to suppress printing the unit in each row since it is already at the top in the column header. """ yield lambda format_, val: format(val.value, format_) yield lambda format_, val: format_.format(val.value) yield lambda format_, val: format_ % val.value class QuantityInfo(QuantityInfoBase): """ Container for meta information like name, description, format. This is required when the object is used as a mixin column within a table, but can be used as a general way to store meta information. """ _represent_as_dict_attrs = ('value', 'unit') _construct_from_dict_args = ['value'] _represent_as_dict_primary_data = 'value' def new_like(self, cols, length, metadata_conflicts='warn', name=None): """ Return a new Quantity instance which is consistent with the input ``cols`` and has ``length`` rows. This is intended for creating an empty column object whose elements can be set in-place for table operations like join or vstack. Parameters ---------- cols : list List of input columns length : int Length of the output column object metadata_conflicts : str ('warn'|'error'|'silent') How to handle metadata conflicts name : str Output column name Returns ------- col : `~astropy.units.Quantity` (or subclass) Empty instance of this class consistent with ``cols`` """ # Get merged info attributes like shape, dtype, format, description, etc. attrs = self.merge_cols_attributes(cols, metadata_conflicts, name, ('meta', 'format', 'description')) # Make an empty quantity using the unit of the last one. shape = (length,) + attrs.pop('shape') dtype = attrs.pop('dtype') # Use zeros so we do not get problems for Quantity subclasses such # as Longitude and Latitude, which cannot take arbitrary values. data = np.zeros(shape=shape, dtype=dtype) # Get arguments needed to reconstruct class map = {key: (data if key == 'value' else getattr(cols[-1], key)) for key in self._represent_as_dict_attrs} map['copy'] = False out = self._construct_from_dict(map) # Set remaining info attributes for attr, value in attrs.items(): setattr(out.info, attr, value) return out def get_sortable_arrays(self): """ Return a list of arrays which can be lexically sorted to represent the order of the parent column. For Quantity this is just the quantity itself. Returns ------- arrays : list of ndarray """ return [self._parent] class Quantity(np.ndarray): """A `~astropy.units.Quantity` represents a number with some associated unit. See also: https://docs.astropy.org/en/stable/units/quantity.html Parameters ---------- value : number, `~numpy.ndarray`, `~astropy.units.Quantity` (sequence), or str The numerical value of this quantity in the units given by unit. If a `Quantity` or sequence of them (or any other valid object with a ``unit`` attribute), creates a new `Quantity` object, converting to `unit` units as needed. If a string, it is converted to a number or `Quantity`, depending on whether a unit is present. unit : unit-like An object that represents the unit associated with the input value. Must be an `~astropy.units.UnitBase` object or a string parseable by the :mod:`~astropy.units` package. dtype : ~numpy.dtype, optional The dtype of the resulting Numpy array or scalar that will hold the value. If not provided, it is determined from the input, except that any integer and (non-Quantity) object inputs are converted to float by default. copy : bool, optional If `True` (default), then the value is copied. Otherwise, a copy will only be made if ``__array__`` returns a copy, if value is a nested sequence, or if a copy is needed to satisfy an explicitly given ``dtype``. (The `False` option is intended mostly for internal use, to speed up initialization where a copy is known to have been made. Use with care.) order : {'C', 'F', 'A'}, optional Specify the order of the array. As in `~numpy.array`. This parameter is ignored if the input is a `Quantity` and ``copy=False``. subok : bool, optional If `False` (default), the returned array will be forced to be a `Quantity`. Otherwise, `Quantity` subclasses will be passed through, or a subclass appropriate for the unit will be used (such as `~astropy.units.Dex` for ``u.dex(u.AA)``). ndmin : int, optional Specifies the minimum number of dimensions that the resulting array should have. Ones will be pre-pended to the shape as needed to meet this requirement. This parameter is ignored if the input is a `Quantity` and ``copy=False``. Raises ------ TypeError If the value provided is not a Python numeric type. TypeError If the unit provided is not either a :class:`~astropy.units.Unit` object or a parseable string unit. Notes ----- Quantities can also be created by multiplying a number or array with a :class:`~astropy.units.Unit`. See https://docs.astropy.org/en/latest/units/ Unless the ``dtype`` argument is explicitly specified, integer or (non-Quantity) object inputs are converted to `float` by default. """ # Need to set a class-level default for _equivalencies, or # Constants can not initialize properly _equivalencies = [] # Default unit for initialization; can be overridden by subclasses, # possibly to `None` to indicate there is no default unit. _default_unit = dimensionless_unscaled # Ensures views have an undefined unit. _unit = None __array_priority__ = 10000 def __class_getitem__(cls, unit_shape_dtype): """Quantity Type Hints. Unit-aware type hints are ``Annotated`` objects that encode the class, the unit, and possibly shape and dtype information, depending on the python and :mod:`numpy` versions. Schematically, ``Annotated[cls[shape, dtype], unit]`` As a classmethod, the type is the class, ie ``Quantity`` produces an ``Annotated[Quantity, ...]`` while a subclass like :class:`~astropy.coordinates.Angle` returns ``Annotated[Angle, ...]``. Parameters ---------- unit_shape_dtype : :class:`~astropy.units.UnitBase`, str, `~astropy.units.PhysicalType`, or tuple Unit specification, can be the physical type (ie str or class). If tuple, then the first element is the unit specification and all other elements are for `numpy.ndarray` type annotations. Whether they are included depends on the python and :mod:`numpy` versions. Returns ------- `typing.Annotated`, `typing_extensions.Annotated`, `astropy.units.Unit`, or `astropy.units.PhysicalType` Return type in this preference order: * if python v3.9+ : `typing.Annotated` * if :mod:`typing_extensions` is installed : `typing_extensions.Annotated` * `astropy.units.Unit` or `astropy.units.PhysicalType` Raises ------ TypeError If the unit/physical_type annotation is not Unit-like or PhysicalType-like. Examples -------- Create a unit-aware Quantity type annotation >>> Quantity[Unit("s")] Annotated[Quantity, Unit("s")] See Also -------- `~astropy.units.quantity_input` Use annotations for unit checks on function arguments and results. Notes ----- With Python 3.9+ or :mod:`typing_extensions`, |Quantity| types are also static-type compatible. """ # LOCAL from ._typing import HAS_ANNOTATED, Annotated # process whether [unit] or [unit, shape, ptype] if isinstance(unit_shape_dtype, tuple): # unit, shape, dtype target = unit_shape_dtype[0] shape_dtype = unit_shape_dtype[1:] else: # just unit target = unit_shape_dtype shape_dtype = () # Allowed unit/physical types. Errors if neither. try: unit = Unit(target) except (TypeError, ValueError): from astropy.units.physical import get_physical_type try: unit = get_physical_type(target) except (TypeError, ValueError, KeyError): # KeyError for Enum raise TypeError("unit annotation is not a Unit or PhysicalType") from None # Allow to sort of work for python 3.8- / no typing_extensions # instead of bailing out, return the unit for `quantity_input` if not HAS_ANNOTATED: warnings.warn("Quantity annotations are valid static type annotations only" " if Python is v3.9+ or `typing_extensions` is installed.") return unit # Quantity does not (yet) properly extend the NumPy generics types, # introduced in numpy v1.22+, instead just including the unit info as # metadata using Annotated. # TODO: ensure we do interact with NDArray.__class_getitem__. return Annotated.__class_getitem__((cls, unit)) def __new__(cls, value, unit=None, dtype=None, copy=True, order=None, subok=False, ndmin=0): if unit is not None: # convert unit first, to avoid multiple string->unit conversions unit = Unit(unit) # optimize speed for Quantity with no dtype given, copy=False if isinstance(value, Quantity): if unit is not None and unit is not value.unit: value = value.to(unit) # the above already makes a copy (with float dtype) copy = False if type(value) is not cls and not (subok and isinstance(value, cls)): value = value.view(cls) if dtype is None and value.dtype.kind in 'iu': dtype = float return np.array(value, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin) # Maybe str, or list/tuple of Quantity? If so, this may set value_unit. # To ensure array remains fast, we short-circuit it. value_unit = None if not isinstance(value, np.ndarray): if isinstance(value, str): # The first part of the regex string matches any integer/float; # the second parts adds possible trailing .+-, which will break # the float function below and ensure things like 1.2.3deg # will not work. pattern = (r'\s*[+-]?' r'((\d+\.?\d*)|(\.\d+)|([nN][aA][nN])|' r'([iI][nN][fF]([iI][nN][iI][tT][yY]){0,1}))' r'([eE][+-]?\d+)?' r'[.+-]?') v = re.match(pattern, value) unit_string = None try: value = float(v.group()) except Exception: raise TypeError('Cannot parse "{}" as a {}. It does not ' 'start with a number.' .format(value, cls.__name__)) unit_string = v.string[v.end():].strip() if unit_string: value_unit = Unit(unit_string) if unit is None: unit = value_unit # signal no conversion needed below. elif isiterable(value) and len(value) > 0: # Iterables like lists and tuples. if all(isinstance(v, Quantity) for v in value): # If a list/tuple containing only quantities, convert all # to the same unit. if unit is None: unit = value[0].unit value = [q.to_value(unit) for q in value] value_unit = unit # signal below that conversion has been done elif (dtype is None and not hasattr(value, 'dtype') and isinstance(unit, StructuredUnit)): # Special case for list/tuple of values and a structured unit: # ``np.array(value, dtype=None)`` would treat tuples as lower # levels of the array, rather than as elements of a structured # array, so we use the structure of the unit to help infer the # structured dtype of the value. dtype = unit._recursively_get_dtype(value) if value_unit is None: # If the value has a `unit` attribute and if not None # (for Columns with uninitialized unit), treat it like a quantity. value_unit = getattr(value, 'unit', None) if value_unit is None: # Default to dimensionless for no (initialized) unit attribute. if unit is None: unit = cls._default_unit value_unit = unit # signal below that no conversion is needed else: try: value_unit = Unit(value_unit) except Exception as exc: raise TypeError("The unit attribute {!r} of the input could " "not be parsed as an astropy Unit, raising " "the following exception:\n{}" .format(value.unit, exc)) if unit is None: unit = value_unit elif unit is not value_unit: copy = False # copy will be made in conversion at end value = np.array(value, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin) # check that array contains numbers or long int objects if (value.dtype.kind in 'OSU' and not (value.dtype.kind == 'O' and isinstance(value.item(0), numbers.Number))): raise TypeError("The value must be a valid Python or " "Numpy numeric type.") # by default, cast any integer, boolean, etc., to float if dtype is None and value.dtype.kind in 'iuO': value = value.astype(float) # if we allow subclasses, allow a class from the unit. if subok: qcls = getattr(unit, '_quantity_class', cls) if issubclass(qcls, cls): cls = qcls value = value.view(cls) value._set_unit(value_unit) if unit is value_unit: return value else: # here we had non-Quantity input that had a "unit" attribute # with a unit different from the desired one. So, convert. return value.to(unit) def __array_finalize__(self, obj): # Check whether super().__array_finalize should be called # (sadly, ndarray.__array_finalize__ is None; we cannot be sure # what is above us). super_array_finalize = super().__array_finalize__ if super_array_finalize is not None: super_array_finalize(obj) # If we're a new object or viewing an ndarray, nothing has to be done. if obj is None or obj.__class__ is np.ndarray: return # If our unit is not set and obj has a valid one, use it. if self._unit is None: unit = getattr(obj, '_unit', None) if unit is not None: self._set_unit(unit) # Copy info if the original had `info` defined. Because of the way the # DataInfo works, `'info' in obj.__dict__` is False until the # `info` attribute is accessed or set. if 'info' in obj.__dict__: self.info = obj.info def __array_wrap__(self, obj, context=None): if context is None: # Methods like .squeeze() created a new `ndarray` and then call # __array_wrap__ to turn the array into self's subclass. return self._new_view(obj) raise NotImplementedError('__array_wrap__ should not be used ' 'with a context any more since all use ' 'should go through array_function. ' 'Please raise an issue on ' 'https://github.com/astropy/astropy') def __array_ufunc__(self, function, method, *inputs, **kwargs): """Wrap numpy ufuncs, taking care of units. Parameters ---------- function : callable ufunc to wrap. method : str Ufunc method: ``__call__``, ``at``, ``reduce``, etc. inputs : tuple Input arrays. kwargs : keyword arguments As passed on, with ``out`` containing possible quantity output. Returns ------- result : `~astropy.units.Quantity` Results of the ufunc, with the unit set properly. """ # Determine required conversion functions -- to bring the unit of the # input to that expected (e.g., radian for np.sin), or to get # consistent units between two inputs (e.g., in np.add) -- # and the unit of the result (or tuple of units for nout > 1). converters, unit = converters_and_unit(function, method, *inputs) out = kwargs.get('out', None) # Avoid loop back by turning any Quantity output into array views. if out is not None: # If pre-allocated output is used, check it is suitable. # This also returns array view, to ensure we don't loop back. if function.nout == 1: out = out[0] out_array = check_output(out, unit, inputs, function=function) # Ensure output argument remains a tuple. kwargs['out'] = (out_array,) if function.nout == 1 else out_array # Same for inputs, but here also convert if necessary. arrays = [] for input_, converter in zip(inputs, converters): input_ = getattr(input_, 'value', input_) arrays.append(converter(input_) if converter else input_) # Call our superclass's __array_ufunc__ result = super().__array_ufunc__(function, method, *arrays, **kwargs) # If unit is None, a plain array is expected (e.g., comparisons), which # means we're done. # We're also done if the result was None (for method 'at') or # NotImplemented, which can happen if other inputs/outputs override # __array_ufunc__; hopefully, they can then deal with us. if unit is None or result is None or result is NotImplemented: return result return self._result_as_quantity(result, unit, out) def _result_as_quantity(self, result, unit, out): """Turn result into a quantity with the given unit. If no output is given, it will take a view of the array as a quantity, and set the unit. If output is given, those should be quantity views of the result arrays, and the function will just set the unit. Parameters ---------- result : ndarray or tuple thereof Array(s) which need to be turned into quantity. unit : `~astropy.units.Unit` Unit for the quantities to be returned (or `None` if the result should not be a quantity). Should be tuple if result is a tuple. out : `~astropy.units.Quantity` or None Possible output quantity. Should be `None` or a tuple if result is a tuple. Returns ------- out : `~astropy.units.Quantity` With units set. """ if isinstance(result, (tuple, list)): if out is None: out = (None,) * len(result) return result.__class__( self._result_as_quantity(result_, unit_, out_) for (result_, unit_, out_) in zip(result, unit, out)) if out is None: # View the result array as a Quantity with the proper unit. return result if unit is None else self._new_view(result, unit) # For given output, just set the unit. We know the unit is not None and # the output is of the correct Quantity subclass, as it was passed # through check_output. out._set_unit(unit) return out def __quantity_subclass__(self, unit): """ Overridden by subclasses to change what kind of view is created based on the output unit of an operation. Parameters ---------- unit : UnitBase The unit for which the appropriate class should be returned Returns ------- tuple : - `~astropy.units.Quantity` subclass - bool: True if subclasses of the given class are ok """ return Quantity, True def _new_view(self, obj=None, unit=None): """ Create a Quantity view of some array-like input, and set the unit By default, return a view of ``obj`` of the same class as ``self`` and with the same unit. Subclasses can override the type of class for a given unit using ``__quantity_subclass__``, and can ensure properties other than the unit are copied using ``__array_finalize__``. If the given unit defines a ``_quantity_class`` of which ``self`` is not an instance, a view using this class is taken. Parameters ---------- obj : ndarray or scalar, optional The array to create a view of. If obj is a numpy or python scalar, it will be converted to an array scalar. By default, ``self`` is converted. unit : unit-like, optional The unit of the resulting object. It is used to select a subclass, and explicitly assigned to the view if given. If not given, the subclass and unit will be that of ``self``. Returns ------- view : `~astropy.units.Quantity` subclass """ # Determine the unit and quantity subclass that we need for the view. if unit is None: unit = self.unit quantity_subclass = self.__class__ elif unit is self.unit and self.__class__ is Quantity: # The second part is because we should not presume what other # classes want to do for the same unit. E.g., Constant will # always want to fall back to Quantity, and relies on going # through `__quantity_subclass__`. quantity_subclass = Quantity else: unit = Unit(unit) quantity_subclass = getattr(unit, '_quantity_class', Quantity) if isinstance(self, quantity_subclass): quantity_subclass, subok = self.__quantity_subclass__(unit) if subok: quantity_subclass = self.__class__ # We only want to propagate information from ``self`` to our new view, # so obj should be a regular array. By using ``np.array``, we also # convert python and numpy scalars, which cannot be viewed as arrays # and thus not as Quantity either, to zero-dimensional arrays. # (These are turned back into scalar in `.value`) # Note that for an ndarray input, the np.array call takes only double # ``obj.__class is np.ndarray``. So, not worth special-casing. if obj is None: obj = self.view(np.ndarray) else: obj = np.array(obj, copy=False, subok=True) # Take the view, set the unit, and update possible other properties # such as ``info``, ``wrap_angle`` in `Longitude`, etc. view = obj.view(quantity_subclass) view._set_unit(unit) view.__array_finalize__(self) return view def _set_unit(self, unit): """Set the unit. This is used anywhere the unit is set or modified, i.e., in the initilizer, in ``__imul__`` and ``__itruediv__`` for in-place multiplication and division by another unit, as well as in ``__array_finalize__`` for wrapping up views. For Quantity, it just sets the unit, but subclasses can override it to check that, e.g., a unit is consistent. """ if not isinstance(unit, UnitBase): if (isinstance(self._unit, StructuredUnit) or isinstance(unit, StructuredUnit)): unit = StructuredUnit(unit, self.dtype) else: # Trying to go through a string ensures that, e.g., Magnitudes with # dimensionless physical unit become Quantity with units of mag. unit = Unit(str(unit), parse_strict='silent') if not isinstance(unit, (UnitBase, StructuredUnit)): raise UnitTypeError( "{} instances require normal units, not {} instances." .format(type(self).__name__, type(unit))) self._unit = unit def __deepcopy__(self, memo): # If we don't define this, ``copy.deepcopy(quantity)`` will # return a bare Numpy array. return self.copy() def __reduce__(self): # patch to pickle Quantity objects (ndarray subclasses), see # http://www.mail-archive.com/[email protected]/msg02446.html object_state = list(super().__reduce__()) object_state[2] = (object_state[2], self.__dict__) return tuple(object_state) def __setstate__(self, state): # patch to unpickle Quantity objects (ndarray subclasses), see # http://www.mail-archive.com/[email protected]/msg02446.html nd_state, own_state = state super().__setstate__(nd_state) self.__dict__.update(own_state) info = QuantityInfo() def _to_value(self, unit, equivalencies=[]): """Helper method for to and to_value.""" if equivalencies == []: equivalencies = self._equivalencies if not self.dtype.names or isinstance(self.unit, StructuredUnit): # Standard path, let unit to do work. return self.unit.to(unit, self.view(np.ndarray), equivalencies=equivalencies) else: # The .to() method of a simple unit cannot convert a structured # dtype, so we work around it, by recursing. # TODO: deprecate this? # Convert simple to Structured on initialization? result = np.empty_like(self.view(np.ndarray)) for name in self.dtype.names: result[name] = self[name]._to_value(unit, equivalencies) return result def to(self, unit, equivalencies=[], copy=True): """ Return a new `~astropy.units.Quantity` object with the specified unit. Parameters ---------- unit : unit-like An object that represents the unit to convert to. Must be an `~astropy.units.UnitBase` object or a string parseable by the `~astropy.units` package. equivalencies : list of tuple A list of equivalence pairs to try if the units are not directly convertible. See :ref:`astropy:unit_equivalencies`. If not provided or ``[]``, class default equivalencies will be used (none for `~astropy.units.Quantity`, but may be set for subclasses) If `None`, no equivalencies will be applied at all, not even any set globally or within a context. copy : bool, optional If `True` (default), then the value is copied. Otherwise, a copy will only be made if necessary. See also -------- to_value : get the numerical value in a given unit. """ # We don't use `to_value` below since we always want to make a copy # and don't want to slow down this method (esp. the scalar case). unit = Unit(unit) if copy: # Avoid using to_value to ensure that we make a copy. We also # don't want to slow down this method (esp. the scalar case). value = self._to_value(unit, equivalencies) else: # to_value only copies if necessary value = self.to_value(unit, equivalencies) return self._new_view(value, unit) def to_value(self, unit=None, equivalencies=[]): """ The numerical value, possibly in a different unit. Parameters ---------- unit : unit-like, optional The unit in which the value should be given. If not given or `None`, use the current unit. equivalencies : list of tuple, optional A list of equivalence pairs to try if the units are not directly convertible (see :ref:`astropy:unit_equivalencies`). If not provided or ``[]``, class default equivalencies will be used (none for `~astropy.units.Quantity`, but may be set for subclasses). If `None`, no equivalencies will be applied at all, not even any set globally or within a context. Returns ------- value : ndarray or scalar The value in the units specified. For arrays, this will be a view of the data if no unit conversion was necessary. See also -------- to : Get a new instance in a different unit. """ if unit is None or unit is self.unit: value = self.view(np.ndarray) elif not self.dtype.names: # For non-structured, we attempt a short-cut, where we just get # the scale. If that is 1, we do not have to do anything. unit = Unit(unit) # We want a view if the unit does not change. One could check # with "==", but that calculates the scale that we need anyway. # TODO: would be better for `unit.to` to have an in-place flag. try: scale = self.unit._to(unit) except Exception: # Short-cut failed; try default (maybe equivalencies help). value = self._to_value(unit, equivalencies) else: value = self.view(np.ndarray) if not is_effectively_unity(scale): # not in-place! value = value * scale else: # For structured arrays, we go the default route. value = self._to_value(unit, equivalencies) # Index with empty tuple to decay array scalars in to numpy scalars. return value if value.shape else value[()] value = property(to_value, doc="""The numerical value of this instance. See also -------- to_value : Get the numerical value in a given unit. """) @property def unit(self): """ A `~astropy.units.UnitBase` object representing the unit of this quantity. """ return self._unit @property def equivalencies(self): """ A list of equivalencies that will be applied by default during unit conversions. """ return self._equivalencies def _recursively_apply(self, func): """Apply function recursively to every field. Returns a copy with the result. """ result = np.empty_like(self) result_value = result.view(np.ndarray) result_unit = () for name in self.dtype.names: part = func(self[name]) result_value[name] = part.value result_unit += (part.unit,) result._set_unit(result_unit) return result @property def si(self): """ Returns a copy of the current `Quantity` instance with SI units. The value of the resulting object will be scaled. """ if self.dtype.names: return self._recursively_apply(operator.attrgetter('si')) si_unit = self.unit.si return self._new_view(self.value * si_unit.scale, si_unit / si_unit.scale) @property def cgs(self): """ Returns a copy of the current `Quantity` instance with CGS units. The value of the resulting object will be scaled. """ if self.dtype.names: return self._recursively_apply(operator.attrgetter('cgs')) cgs_unit = self.unit.cgs return self._new_view(self.value * cgs_unit.scale, cgs_unit / cgs_unit.scale) @property def isscalar(self): """ True if the `value` of this quantity is a scalar, or False if it is an array-like object. .. note:: This is subtly different from `numpy.isscalar` in that `numpy.isscalar` returns False for a zero-dimensional array (e.g. ``np.array(1)``), while this is True for quantities, since quantities cannot represent true numpy scalars. """ return not self.shape # This flag controls whether convenience conversion members, such # as `q.m` equivalent to `q.to_value(u.m)` are available. This is # not turned on on Quantity itself, but is on some subclasses of # Quantity, such as `astropy.coordinates.Angle`. _include_easy_conversion_members = False @override__dir__ def __dir__(self): """ Quantities are able to directly convert to other units that have the same physical type. This function is implemented in order to make autocompletion still work correctly in IPython. """ if not self._include_easy_conversion_members: return [] extra_members = set() equivalencies = Unit._normalize_equivalencies(self.equivalencies) for equivalent in self.unit._get_units_with_same_physical_type( equivalencies): extra_members.update(equivalent.names) return extra_members def __getattr__(self, attr): """ Quantities are able to directly convert to other units that have the same physical type. """ if not self._include_easy_conversion_members: raise AttributeError( f"'{self.__class__.__name__}' object has no '{attr}' member") def get_virtual_unit_attribute(): registry = get_current_unit_registry().registry to_unit = registry.get(attr, None) if to_unit is None: return None try: return self.unit.to( to_unit, self.value, equivalencies=self.equivalencies) except UnitsError: return None value = get_virtual_unit_attribute() if value is None: raise AttributeError( f"{self.__class__.__name__} instance has no attribute '{attr}'") else: return value # Equality needs to be handled explicitly as ndarray.__eq__ gives # DeprecationWarnings on any error, which is distracting, and does not # deal well with structured arrays (nor does the ufunc). def __eq__(self, other): try: other_value = self._to_own_unit(other) except UnitsError: return False except Exception: return NotImplemented return self.value.__eq__(other_value) def __ne__(self, other): try: other_value = self._to_own_unit(other) except UnitsError: return True except Exception: return NotImplemented return self.value.__ne__(other_value) # Unit conversion operator (<<). def __lshift__(self, other): try: other = Unit(other, parse_strict='silent') except UnitTypeError: return NotImplemented return self.__class__(self, other, copy=False, subok=True) def __ilshift__(self, other): try: other = Unit(other, parse_strict='silent') except UnitTypeError: return NotImplemented try: factor = self.unit._to(other) except Exception: # Maybe via equivalencies? Now we do make a temporary copy. try: value = self._to_value(other) except UnitConversionError: return NotImplemented self.view(np.ndarray)[...] = value else: self.view(np.ndarray)[...] *= factor self._set_unit(other) return self def __rlshift__(self, other): if not self.isscalar: return NotImplemented return Unit(self).__rlshift__(other) # Give warning for other >> self, since probably other << self was meant. def __rrshift__(self, other): warnings.warn(">> is not implemented. Did you mean to convert " "something to this quantity as a unit using '<<'?", AstropyWarning) return NotImplemented # Also define __rshift__ and __irshift__ so we override default ndarray # behaviour, but instead of emitting a warning here, let it be done by # other (which likely is a unit if this was a mistake). def __rshift__(self, other): return NotImplemented def __irshift__(self, other): return NotImplemented # Arithmetic operations def __mul__(self, other): """ Multiplication between `Quantity` objects and other objects.""" if isinstance(other, (UnitBase, str)): try: return self._new_view(self.copy(), other * self.unit) except UnitsError: # let other try to deal with it return NotImplemented return super().__mul__(other) def __imul__(self, other): """In-place multiplication between `Quantity` objects and others.""" if isinstance(other, (UnitBase, str)): self._set_unit(other * self.unit) return self return super().__imul__(other) def __rmul__(self, other): """ Right Multiplication between `Quantity` objects and other objects. """ return self.__mul__(other) def __truediv__(self, other): """ Division between `Quantity` objects and other objects.""" if isinstance(other, (UnitBase, str)): try: return self._new_view(self.copy(), self.unit / other) except UnitsError: # let other try to deal with it return NotImplemented return super().__truediv__(other) def __itruediv__(self, other): """Inplace division between `Quantity` objects and other objects.""" if isinstance(other, (UnitBase, str)): self._set_unit(self.unit / other) return self return super().__itruediv__(other) def __rtruediv__(self, other): """ Right Division between `Quantity` objects and other objects.""" if isinstance(other, (UnitBase, str)): return self._new_view(1. / self.value, other / self.unit) return super().__rtruediv__(other) def __pow__(self, other): if isinstance(other, Fraction): # Avoid getting object arrays by raising the value to a Fraction. return self._new_view(self.value ** float(other), self.unit ** other) return super().__pow__(other) # other overrides of special functions def __hash__(self): return hash(self.value) ^ hash(self.unit) def __iter__(self): if self.isscalar: raise TypeError( "'{cls}' object with a scalar value is not iterable" .format(cls=self.__class__.__name__)) # Otherwise return a generator def quantity_iter(): for val in self.value: yield self._new_view(val) return quantity_iter() def __getitem__(self, key): if isinstance(key, str) and isinstance(self.unit, StructuredUnit): return self._new_view(self.view(np.ndarray)[key], self.unit[key]) try: out = super().__getitem__(key) except IndexError: # We want zero-dimensional Quantity objects to behave like scalars, # so they should raise a TypeError rather than an IndexError. if self.isscalar: raise TypeError( "'{cls}' object with a scalar value does not support " "indexing".format(cls=self.__class__.__name__)) else: raise # For single elements, ndarray.__getitem__ returns scalars; these # need a new view as a Quantity. if not isinstance(out, np.ndarray): out = self._new_view(out) return out def __setitem__(self, i, value): if isinstance(i, str): # Indexing will cause a different unit, so by doing this in # two steps we effectively try with the right unit. self[i][...] = value return # update indices in info if the info property has been accessed # (in which case 'info' in self.__dict__ is True; this is guaranteed # to be the case if we're part of a table). if not self.isscalar and 'info' in self.__dict__: self.info.adjust_indices(i, value, len(self)) self.view(np.ndarray).__setitem__(i, self._to_own_unit(value)) # __contains__ is OK def __bool__(self): """Quantities should always be treated as non-False; there is too much potential for ambiguity otherwise. """ warnings.warn('The truth value of a Quantity is ambiguous. ' 'In the future this will raise a ValueError.', AstropyDeprecationWarning) return True def __len__(self): if self.isscalar: raise TypeError("'{cls}' object with a scalar value has no " "len()".format(cls=self.__class__.__name__)) else: return len(self.value) # Numerical types def __float__(self): try: return float(self.to_value(dimensionless_unscaled)) except (UnitsError, TypeError): raise TypeError('only dimensionless scalar quantities can be ' 'converted to Python scalars') def __int__(self): try: return int(self.to_value(dimensionless_unscaled)) except (UnitsError, TypeError): raise TypeError('only dimensionless scalar quantities can be ' 'converted to Python scalars') def __index__(self): # for indices, we do not want to mess around with scaling at all, # so unlike for float, int, we insist here on unscaled dimensionless try: assert self.unit.is_unity() return self.value.__index__() except Exception: raise TypeError('only integer dimensionless scalar quantities ' 'can be converted to a Python index') # TODO: we may want to add a hook for dimensionless quantities? @property def _unitstr(self): if self.unit is None: unitstr = _UNIT_NOT_INITIALISED else: unitstr = str(self.unit) if unitstr: unitstr = ' ' + unitstr return unitstr def to_string(self, unit=None, precision=None, format=None, subfmt=None): """ Generate a string representation of the quantity and its unit. The behavior of this function can be altered via the `numpy.set_printoptions` function and its various keywords. The exception to this is the ``threshold`` keyword, which is controlled via the ``[units.quantity]`` configuration item ``latex_array_threshold``. This is treated separately because the numpy default of 1000 is too big for most browsers to handle. Parameters ---------- unit : unit-like, optional Specifies the unit. If not provided, the unit used to initialize the quantity will be used. precision : number, optional The level of decimal precision. If `None`, or not provided, it will be determined from NumPy print options. format : str, optional The format of the result. If not provided, an unadorned string is returned. Supported values are: - 'latex': Return a LaTeX-formatted string - 'latex_inline': Return a LaTeX-formatted string that uses negative exponents instead of fractions subfmt : str, optional Subformat of the result. For the moment, only used for ``format='latex'`` and ``format='latex_inline'``. Supported values are: - 'inline': Use ``$ ... $`` as delimiters. - 'display': Use ``$\\displaystyle ... $`` as delimiters. Returns ------- str A string with the contents of this Quantity """ if unit is not None and unit != self.unit: return self.to(unit).to_string( unit=None, precision=precision, format=format, subfmt=subfmt) formats = { None: None, "latex": { None: ("$", "$"), "inline": ("$", "$"), "display": (r"$\displaystyle ", r"$"), }, } formats['latex_inline'] = formats['latex'] if format not in formats: raise ValueError(f"Unknown format '{format}'") elif format is None: if precision is None: # Use default formatting settings return f'{self.value}{self._unitstr:s}' else: # np.array2string properly formats arrays as well as scalars return np.array2string(self.value, precision=precision, floatmode="fixed") + self._unitstr # else, for the moment we assume format="latex" or "latex_inline". # Set the precision if set, otherwise use numpy default pops = np.get_printoptions() format_spec = f".{precision if precision is not None else pops['precision']}g" def float_formatter(value): return Latex.format_exponential_notation(value, format_spec=format_spec) def complex_formatter(value): return '({}{}i)'.format( Latex.format_exponential_notation(value.real, format_spec=format_spec), Latex.format_exponential_notation(value.imag, format_spec='+' + format_spec)) # The view is needed for the scalar case - self.value might be float. latex_value = np.array2string( self.view(np.ndarray), threshold=(conf.latex_array_threshold if conf.latex_array_threshold > -1 else pops['threshold']), formatter={'float_kind': float_formatter, 'complex_kind': complex_formatter}, max_line_width=np.inf, separator=',~') latex_value = latex_value.replace('...', r'\dots') # Format unit # [1:-1] strips the '$' on either side needed for math mode if self.unit is None: latex_unit = _UNIT_NOT_INITIALISED elif format == 'latex': latex_unit = self.unit._repr_latex_()[1:-1] # note this is unicode elif format == 'latex_inline': latex_unit = self.unit.to_string(format='latex_inline')[1:-1] delimiter_left, delimiter_right = formats[format][subfmt] return rf'{delimiter_left}{latex_value} \; {latex_unit}{delimiter_right}' def __str__(self): return self.to_string() def __repr__(self): prefixstr = '<' + self.__class__.__name__ + ' ' arrstr = np.array2string(self.view(np.ndarray), separator=', ', prefix=prefixstr) return f'{prefixstr}{arrstr}{self._unitstr:s}>' def _repr_latex_(self): """ Generate a latex representation of the quantity and its unit. Returns ------- lstr A LaTeX string with the contents of this Quantity """ # NOTE: This should change to display format in a future release return self.to_string(format='latex', subfmt='inline') def __format__(self, format_spec): """ Format quantities using the new-style python formatting codes as specifiers for the number. If the format specifier correctly applies itself to the value, then it is used to format only the value. If it cannot be applied to the value, then it is applied to the whole string. """ try: value = format(self.value, format_spec) full_format_spec = "s" except ValueError: value = self.value full_format_spec = format_spec return format(f"{value}{self._unitstr:s}", full_format_spec) def decompose(self, bases=[]): """ Generates a new `Quantity` with the units decomposed. Decomposed units have only irreducible units in them (see `astropy.units.UnitBase.decompose`). Parameters ---------- bases : sequence of `~astropy.units.UnitBase`, optional The bases to decompose into. When not provided, decomposes down to any irreducible units. When provided, the decomposed result will only contain the given units. This will raises a `~astropy.units.UnitsError` if it's not possible to do so. Returns ------- newq : `~astropy.units.Quantity` A new object equal to this quantity with units decomposed. """ return self._decompose(False, bases=bases) def _decompose(self, allowscaledunits=False, bases=[]): """ Generates a new `Quantity` with the units decomposed. Decomposed units have only irreducible units in them (see `astropy.units.UnitBase.decompose`). Parameters ---------- allowscaledunits : bool If True, the resulting `Quantity` may have a scale factor associated with it. If False, any scaling in the unit will be subsumed into the value of the resulting `Quantity` bases : sequence of UnitBase, optional The bases to decompose into. When not provided, decomposes down to any irreducible units. When provided, the decomposed result will only contain the given units. This will raises a `~astropy.units.UnitsError` if it's not possible to do so. Returns ------- newq : `~astropy.units.Quantity` A new object equal to this quantity with units decomposed. """ new_unit = self.unit.decompose(bases=bases) # Be careful here because self.value usually is a view of self; # be sure that the original value is not being modified. if not allowscaledunits and hasattr(new_unit, 'scale'): new_value = self.value * new_unit.scale new_unit = new_unit / new_unit.scale return self._new_view(new_value, new_unit) else: return self._new_view(self.copy(), new_unit) # These functions need to be overridden to take into account the units # Array conversion # https://numpy.org/doc/stable/reference/arrays.ndarray.html#array-conversion def item(self, *args): """Copy an element of an array to a scalar Quantity and return it. Like :meth:`~numpy.ndarray.item` except that it always returns a `Quantity`, not a Python scalar. """ return self._new_view(super().item(*args)) def tolist(self): raise NotImplementedError("cannot make a list of Quantities. Get " "list of values with q.value.tolist()") def _to_own_unit(self, value, check_precision=True): try: _value = value.to_value(self.unit) except AttributeError: # We're not a Quantity. # First remove two special cases (with a fast test): # 1) Maybe masked printing? MaskedArray with quantities does not # work very well, but no reason to break even repr and str. # 2) np.ma.masked? useful if we're a MaskedQuantity. if (value is np.ma.masked or (value is np.ma.masked_print_option and self.dtype.kind == 'O')): return value # Now, let's try a more general conversion. # Plain arrays will be converted to dimensionless in the process, # but anything with a unit attribute will use that. try: as_quantity = Quantity(value) _value = as_quantity.to_value(self.unit) except UnitsError: # last chance: if this was not something with a unit # and is all 0, inf, or nan, we treat it as arbitrary unit. if (not hasattr(value, 'unit') and can_have_arbitrary_unit(as_quantity.value)): _value = as_quantity.value else: raise if self.dtype.kind == 'i' and check_precision: # If, e.g., we are casting float to int, we want to fail if # precision is lost, but let things pass if it works. _value = np.array(_value, copy=False, subok=True) if not np.can_cast(_value.dtype, self.dtype): self_dtype_array = np.array(_value, self.dtype, subok=True) if not np.all(np.logical_or(self_dtype_array == _value, np.isnan(_value))): raise TypeError("cannot convert value type to array type " "without precision loss") # Setting names to ensure things like equality work (note that # above will have failed already if units did not match). if self.dtype.names: _value.dtype.names = self.dtype.names return _value def itemset(self, *args): if len(args) == 0: raise ValueError("itemset must have at least one argument") self.view(np.ndarray).itemset(*(args[:-1] + (self._to_own_unit(args[-1]),))) def tostring(self, order='C'): raise NotImplementedError("cannot write Quantities to string. Write " "array with q.value.tostring(...).") def tobytes(self, order='C'): raise NotImplementedError("cannot write Quantities to string. Write " "array with q.value.tobytes(...).") def tofile(self, fid, sep="", format="%s"): raise NotImplementedError("cannot write Quantities to file. Write " "array with q.value.tofile(...)") def dump(self, file): raise NotImplementedError("cannot dump Quantities to file. Write " "array with q.value.dump()") def dumps(self): raise NotImplementedError("cannot dump Quantities to string. Write " "array with q.value.dumps()") # astype, byteswap, copy, view, getfield, setflags OK as is def fill(self, value): self.view(np.ndarray).fill(self._to_own_unit(value)) # Shape manipulation: resize cannot be done (does not own data), but # shape, transpose, swapaxes, flatten, ravel, squeeze all OK. Only # the flat iterator needs to be overwritten, otherwise single items are # returned as numbers. @property def flat(self): """A 1-D iterator over the Quantity array. This returns a ``QuantityIterator`` instance, which behaves the same as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`, and is similar to, but not a subclass of, Python's built-in iterator object. """ return QuantityIterator(self) @flat.setter def flat(self, value): y = self.ravel() y[:] = value # Item selection and manipulation # repeat, sort, compress, diagonal OK def take(self, indices, axis=None, out=None, mode='raise'): out = super().take(indices, axis=axis, out=out, mode=mode) # For single elements, ndarray.take returns scalars; these # need a new view as a Quantity. if type(out) is not type(self): out = self._new_view(out) return out def put(self, indices, values, mode='raise'): self.view(np.ndarray).put(indices, self._to_own_unit(values), mode) def choose(self, choices, out=None, mode='raise'): raise NotImplementedError("cannot choose based on quantity. Choose " "using array with q.value.choose(...)") # ensure we do not return indices as quantities def argsort(self, axis=-1, kind='quicksort', order=None): return self.view(np.ndarray).argsort(axis=axis, kind=kind, order=order) def searchsorted(self, v, *args, **kwargs): return np.searchsorted(np.array(self), self._to_own_unit(v, check_precision=False), *args, **kwargs) # avoid numpy 1.6 problem def argmax(self, axis=None, out=None): return self.view(np.ndarray).argmax(axis, out=out) def argmin(self, axis=None, out=None): return self.view(np.ndarray).argmin(axis, out=out) def __array_function__(self, function, types, args, kwargs): """Wrap numpy functions, taking care of units. Parameters ---------- function : callable Numpy function to wrap types : iterable of classes Classes that provide an ``__array_function__`` override. Can in principle be used to interact with other classes. Below, mostly passed on to `~numpy.ndarray`, which can only interact with subclasses. args : tuple Positional arguments provided in the function call. kwargs : dict Keyword arguments provided in the function call. Returns ------- result: `~astropy.units.Quantity`, `~numpy.ndarray` As appropriate for the function. If the function is not supported, `NotImplemented` is returned, which will lead to a `TypeError` unless another argument overrode the function. Raises ------ ~astropy.units.UnitsError If operands have incompatible units. """ # A function should be in one of the following sets or dicts: # 1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation # supports Quantity; we pass on to ndarray.__array_function__. # 2. FUNCTION_HELPERS (dict), if the numpy implementation is usable # after converting quantities to arrays with suitable units, # and possibly setting units on the result. # 3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but # requires a Quantity-specific implementation. # 4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense. # For now, since we may not yet have complete coverage, if a # function is in none of the above, we simply call the numpy # implementation. if function in SUBCLASS_SAFE_FUNCTIONS: return super().__array_function__(function, types, args, kwargs) elif function in FUNCTION_HELPERS: function_helper = FUNCTION_HELPERS[function] try: args, kwargs, unit, out = function_helper(*args, **kwargs) except NotImplementedError: return self._not_implemented_or_raise(function, types) result = super().__array_function__(function, types, args, kwargs) # Fall through to return section elif function in DISPATCHED_FUNCTIONS: dispatched_function = DISPATCHED_FUNCTIONS[function] try: result, unit, out = dispatched_function(*args, **kwargs) except NotImplementedError: return self._not_implemented_or_raise(function, types) # Fall through to return section elif function in UNSUPPORTED_FUNCTIONS: return NotImplemented else: warnings.warn("function '{}' is not known to astropy's Quantity. " "Will run it anyway, hoping it will treat ndarray " "subclasses correctly. Please raise an issue at " "https://github.com/astropy/astropy/issues. " .format(function.__name__), AstropyWarning) return super().__array_function__(function, types, args, kwargs) # If unit is None, a plain array is expected (e.g., boolean), which # means we're done. # We're also done if the result was NotImplemented, which can happen # if other inputs/outputs override __array_function__; # hopefully, they can then deal with us. if unit is None or result is NotImplemented: return result return self._result_as_quantity(result, unit, out=out) def _not_implemented_or_raise(self, function, types): # Our function helper or dispatcher found that the function does not # work with Quantity. In principle, there may be another class that # knows what to do with us, for which we should return NotImplemented. # But if there is ndarray (or a non-Quantity subclass of it) around, # it quite likely coerces, so we should just break. if any(issubclass(t, np.ndarray) and not issubclass(t, Quantity) for t in types): raise TypeError("the Quantity implementation cannot handle {} " "with the given arguments." .format(function)) from None else: return NotImplemented # Calculation -- override ndarray methods to take into account units. # We use the corresponding numpy functions to evaluate the results, since # the methods do not always allow calling with keyword arguments. # For instance, np.array([0.,2.]).clip(a_min=0., a_max=1.) gives # TypeError: 'a_max' is an invalid keyword argument for this function. def _wrap_function(self, function, *args, unit=None, out=None, **kwargs): """Wrap a numpy function that processes self, returning a Quantity. Parameters ---------- function : callable Numpy function to wrap. args : positional arguments Any positional arguments to the function beyond the first argument (which will be set to ``self``). kwargs : keyword arguments Keyword arguments to the function. If present, the following arguments are treated specially: unit : `~astropy.units.Unit` Unit of the output result. If not given, the unit of ``self``. out : `~astropy.units.Quantity` A Quantity instance in which to store the output. Notes ----- Output should always be assigned via a keyword argument, otherwise no proper account of the unit is taken. Returns ------- out : `~astropy.units.Quantity` Result of the function call, with the unit set properly. """ if unit is None: unit = self.unit # Ensure we don't loop back by turning any Quantity into array views. args = (self.value,) + tuple((arg.value if isinstance(arg, Quantity) else arg) for arg in args) if out is not None: # If pre-allocated output is used, check it is suitable. # This also returns array view, to ensure we don't loop back. arrays = tuple(arg for arg in args if isinstance(arg, np.ndarray)) kwargs['out'] = check_output(out, unit, arrays, function=function) # Apply the function and turn it back into a Quantity. result = function(*args, **kwargs) return self._result_as_quantity(result, unit, out) def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): return self._wrap_function(np.trace, offset, axis1, axis2, dtype, out=out) if NUMPY_LT_1_20: def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): return self._wrap_function(np.var, axis, dtype, out=out, ddof=ddof, keepdims=keepdims, unit=self.unit**2) else: def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True): return self._wrap_function(np.var, axis, dtype, out=out, ddof=ddof, keepdims=keepdims, where=where, unit=self.unit**2) if NUMPY_LT_1_20: def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): return self._wrap_function(np.std, axis, dtype, out=out, ddof=ddof, keepdims=keepdims) else: def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True): return self._wrap_function(np.std, axis, dtype, out=out, ddof=ddof, keepdims=keepdims, where=where) if NUMPY_LT_1_20: def mean(self, axis=None, dtype=None, out=None, keepdims=False): return self._wrap_function(np.mean, axis, dtype, out=out, keepdims=keepdims) else: def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True): return self._wrap_function(np.mean, axis, dtype, out=out, keepdims=keepdims, where=where) def round(self, decimals=0, out=None): return self._wrap_function(np.round, decimals, out=out) def dot(self, b, out=None): result_unit = self.unit * getattr(b, 'unit', dimensionless_unscaled) return self._wrap_function(np.dot, b, out=out, unit=result_unit) # Calculation: override methods that do not make sense. def all(self, axis=None, out=None): raise TypeError("cannot evaluate truth value of quantities. " "Evaluate array with q.value.all(...)") def any(self, axis=None, out=None): raise TypeError("cannot evaluate truth value of quantities. " "Evaluate array with q.value.any(...)") # Calculation: numpy functions that can be overridden with methods. def diff(self, n=1, axis=-1): return self._wrap_function(np.diff, n, axis) def ediff1d(self, to_end=None, to_begin=None): return self._wrap_function(np.ediff1d, to_end, to_begin) if NUMPY_LT_1_22: def nansum(self, axis=None, out=None, keepdims=False): return self._wrap_function(np.nansum, axis, out=out, keepdims=keepdims) else: def nansum(self, axis=None, out=None, keepdims=False, *, initial=None, where=True): return self._wrap_function(np.nansum, axis, out=out, keepdims=keepdims, initial=initial, where=where) def insert(self, obj, values, axis=None): """ Insert values along the given axis before the given indices and return a new `~astropy.units.Quantity` object. This is a thin wrapper around the `numpy.insert` function. Parameters ---------- obj : int, slice or sequence of int Object that defines the index or indices before which ``values`` is inserted. values : array-like Values to insert. If the type of ``values`` is different from that of quantity, ``values`` is converted to the matching type. ``values`` should be shaped so that it can be broadcast appropriately The unit of ``values`` must be consistent with this quantity. axis : int, optional Axis along which to insert ``values``. If ``axis`` is None then the quantity array is flattened before insertion. Returns ------- out : `~astropy.units.Quantity` A copy of quantity with ``values`` inserted. Note that the insertion does not occur in-place: a new quantity array is returned. Examples -------- >>> import astropy.units as u >>> q = [1, 2] * u.m >>> q.insert(0, 50 * u.cm) <Quantity [ 0.5, 1., 2.] m> >>> q = [[1, 2], [3, 4]] * u.m >>> q.insert(1, [10, 20] * u.m, axis=0) <Quantity [[ 1., 2.], [ 10., 20.], [ 3., 4.]] m> >>> q.insert(1, 10 * u.m, axis=1) <Quantity [[ 1., 10., 2.], [ 3., 10., 4.]] m> """ out_array = np.insert(self.value, obj, self._to_own_unit(values), axis) return self._new_view(out_array) class SpecificTypeQuantity(Quantity): """Superclass for Quantities of specific physical type. Subclasses of these work just like :class:`~astropy.units.Quantity`, except that they are for specific physical types (and may have methods that are only appropriate for that type). Astropy examples are :class:`~astropy.coordinates.Angle` and :class:`~astropy.coordinates.Distance` At a minimum, subclasses should set ``_equivalent_unit`` to the unit associated with the physical type. """ # The unit for the specific physical type. Instances can only be created # with units that are equivalent to this. _equivalent_unit = None # The default unit used for views. Even with `None`, views of arrays # without units are possible, but will have an uninitialized unit. _unit = None # Default unit for initialization through the constructor. _default_unit = None # ensure that we get precedence over our superclass. __array_priority__ = Quantity.__array_priority__ + 10 def __quantity_subclass__(self, unit): if unit.is_equivalent(self._equivalent_unit): return type(self), True else: return super().__quantity_subclass__(unit)[0], False def _set_unit(self, unit): if unit is None or not unit.is_equivalent(self._equivalent_unit): raise UnitTypeError( "{} instances require units equivalent to '{}'" .format(type(self).__name__, self._equivalent_unit) + (", but no unit was given." if unit is None else f", so cannot set it to '{unit}'.")) super()._set_unit(unit) def isclose(a, b, rtol=1.e-5, atol=None, equal_nan=False, **kwargs): """ Return a boolean array where two arrays are element-wise equal within a tolerance. Parameters ---------- a, b : array-like or `~astropy.units.Quantity` Input values or arrays to compare rtol : array-like or `~astropy.units.Quantity` The relative tolerance for the comparison, which defaults to ``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`, then it must be dimensionless. atol : number or `~astropy.units.Quantity` The absolute tolerance for the comparison. The units (or lack thereof) of ``a``, ``b``, and ``atol`` must be consistent with each other. If `None`, ``atol`` defaults to zero in the appropriate units. equal_nan : `bool` Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will be considered equal to NaN’s in ``b``. Notes ----- This is a :class:`~astropy.units.Quantity`-aware version of :func:`numpy.isclose`. However, this differs from the `numpy` function in that the default for the absolute tolerance here is zero instead of ``atol=1e-8`` in `numpy`, as there is no natural way to set a default *absolute* tolerance given two inputs that may have differently scaled units. Raises ------ `~astropy.units.UnitsError` If the dimensions of ``a``, ``b``, or ``atol`` are incompatible, or if ``rtol`` is not dimensionless. See also -------- allclose """ unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol) return np.isclose(*unquantified_args, equal_nan=equal_nan, **kwargs) def allclose(a, b, rtol=1.e-5, atol=None, equal_nan=False, **kwargs) -> bool: """ Whether two arrays are element-wise equal within a tolerance. Parameters ---------- a, b : array-like or `~astropy.units.Quantity` Input values or arrays to compare rtol : array-like or `~astropy.units.Quantity` The relative tolerance for the comparison, which defaults to ``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`, then it must be dimensionless. atol : number or `~astropy.units.Quantity` The absolute tolerance for the comparison. The units (or lack thereof) of ``a``, ``b``, and ``atol`` must be consistent with each other. If `None`, ``atol`` defaults to zero in the appropriate units. equal_nan : `bool` Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will be considered equal to NaN’s in ``b``. Notes ----- This is a :class:`~astropy.units.Quantity`-aware version of :func:`numpy.allclose`. However, this differs from the `numpy` function in that the default for the absolute tolerance here is zero instead of ``atol=1e-8`` in `numpy`, as there is no natural way to set a default *absolute* tolerance given two inputs that may have differently scaled units. Raises ------ `~astropy.units.UnitsError` If the dimensions of ``a``, ``b``, or ``atol`` are incompatible, or if ``rtol`` is not dimensionless. See also -------- isclose """ unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol) return np.allclose(*unquantified_args, equal_nan=equal_nan, **kwargs) def _unquantify_allclose_arguments(actual, desired, rtol, atol): actual = Quantity(actual, subok=True, copy=False) desired = Quantity(desired, subok=True, copy=False) try: desired = desired.to(actual.unit) except UnitsError: raise UnitsError( f"Units for 'desired' ({desired.unit}) and 'actual' " f"({actual.unit}) are not convertible" ) if atol is None: # By default, we assume an absolute tolerance of zero in the # appropriate units. The default value of None for atol is # needed because the units of atol must be consistent with the # units for a and b. atol = Quantity(0) else: atol = Quantity(atol, subok=True, copy=False) try: atol = atol.to(actual.unit) except UnitsError: raise UnitsError( f"Units for 'atol' ({atol.unit}) and 'actual' " f"({actual.unit}) are not convertible" ) rtol = Quantity(rtol, subok=True, copy=False) try: rtol = rtol.to(dimensionless_unscaled) except Exception: raise UnitsError("'rtol' should be dimensionless") return actual.value, desired.value, rtol.value, atol.value
378d9506fedad1757b20370e28b4610308f4832689ce1c6c156e77e5caa151f5
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package defines the SI units. They are also available in the `astropy.units` namespace. """ import numpy as _numpy from astropy.constants import si as _si from .core import Unit, UnitBase, def_unit _ns = globals() ########################################################################### # DIMENSIONLESS def_unit(['percent', 'pct'], Unit(0.01), namespace=_ns, prefixes=False, doc="percent: one hundredth of unity, factor 0.01", format={'generic': '%', 'console': '%', 'cds': '%', 'latex': r'\%', 'unicode': '%'}) ########################################################################### # LENGTH def_unit(['m', 'meter'], namespace=_ns, prefixes=True, doc="meter: base unit of length in SI") def_unit(['micron'], um, namespace=_ns, doc="micron: alias for micrometer (um)", format={'latex': r'\mu m', 'unicode': '\N{MICRO SIGN}m'}) def_unit(['Angstrom', 'AA', 'angstrom'], 0.1 * nm, namespace=_ns, doc="ångström: 10 ** -10 m", prefixes=[(['m', 'milli'], ['milli', 'm'], 1.e-3)], format={'latex': r'\mathring{A}', 'unicode': 'Å', 'vounit': 'Angstrom'}) ########################################################################### # VOLUMES def_unit((['l', 'L'], ['liter']), 1000 * cm ** 3.0, namespace=_ns, prefixes=True, format={'latex': r'\mathcal{l}', 'unicode': 'ℓ'}, doc="liter: metric unit of volume") ########################################################################### # ANGULAR MEASUREMENTS def_unit(['rad', 'radian'], namespace=_ns, prefixes=True, doc="radian: angular measurement of the ratio between the length " "on an arc and its radius") def_unit(['deg', 'degree'], _numpy.pi / 180.0 * rad, namespace=_ns, prefixes=True, doc="degree: angular measurement 1/360 of full rotation", format={'latex': r'{}^{\circ}', 'unicode': '°'}) def_unit(['hourangle'], 15.0 * deg, namespace=_ns, prefixes=False, doc="hour angle: angular measurement with 24 in a full circle", format={'latex': r'{}^{h}', 'unicode': 'ʰ'}) def_unit(['arcmin', 'arcminute'], 1.0 / 60.0 * deg, namespace=_ns, prefixes=True, doc="arc minute: angular measurement", format={'latex': r'{}^{\prime}', 'unicode': '′'}) def_unit(['arcsec', 'arcsecond'], 1.0 / 3600.0 * deg, namespace=_ns, prefixes=True, doc="arc second: angular measurement") # These special formats should only be used for the non-prefix versions arcsec._format = {'latex': r'{}^{\prime\prime}', 'unicode': '″'} def_unit(['mas'], 0.001 * arcsec, namespace=_ns, doc="milli arc second: angular measurement") def_unit(['uas'], 0.000001 * arcsec, namespace=_ns, doc="micro arc second: angular measurement", format={'latex': r'\mu as', 'unicode': 'μas'}) def_unit(['sr', 'steradian'], rad ** 2, namespace=_ns, prefixes=True, doc="steradian: unit of solid angle in SI") ########################################################################### # TIME def_unit(['s', 'second'], namespace=_ns, prefixes=True, exclude_prefixes=['a'], doc="second: base unit of time in SI.") def_unit(['min', 'minute'], 60 * s, prefixes=True, namespace=_ns) def_unit(['h', 'hour', 'hr'], 3600 * s, namespace=_ns, prefixes=True, exclude_prefixes=['p']) def_unit(['d', 'day'], 24 * h, namespace=_ns, prefixes=True, exclude_prefixes=['c', 'y']) def_unit(['sday'], 86164.09053 * s, namespace=_ns, doc="Sidereal day (sday) is the time of one rotation of the Earth.") def_unit(['wk', 'week'], 7 * day, namespace=_ns) def_unit(['fortnight'], 2 * wk, namespace=_ns) def_unit(['a', 'annum'], 365.25 * d, namespace=_ns, prefixes=True, exclude_prefixes=['P']) def_unit(['yr', 'year'], 365.25 * d, namespace=_ns, prefixes=True) ########################################################################### # FREQUENCY def_unit(['Hz', 'Hertz', 'hertz'], 1 / s, namespace=_ns, prefixes=True, doc="Frequency") ########################################################################### # MASS def_unit(['kg', 'kilogram'], namespace=_ns, doc="kilogram: base unit of mass in SI.") def_unit(['g', 'gram'], 1.0e-3 * kg, namespace=_ns, prefixes=True, exclude_prefixes=['k', 'kilo']) def_unit(['t', 'tonne'], 1000 * kg, namespace=_ns, doc="Metric tonne") ########################################################################### # AMOUNT OF SUBSTANCE def_unit(['mol', 'mole'], namespace=_ns, prefixes=True, doc="mole: amount of a chemical substance in SI.") ########################################################################### # TEMPERATURE def_unit( ['K', 'Kelvin'], namespace=_ns, prefixes=True, doc="Kelvin: temperature with a null point at absolute zero.") def_unit( ['deg_C', 'Celsius'], namespace=_ns, doc='Degrees Celsius', format={'latex': r'{}^{\circ}C', 'unicode': '°C'}) ########################################################################### # FORCE def_unit(['N', 'Newton', 'newton'], kg * m * s ** -2, namespace=_ns, prefixes=True, doc="Newton: force") ########################################################################## # ENERGY def_unit(['J', 'Joule', 'joule'], N * m, namespace=_ns, prefixes=True, doc="Joule: energy") def_unit(['eV', 'electronvolt'], _si.e.value * J, namespace=_ns, prefixes=True, doc="Electron Volt") ########################################################################## # PRESSURE def_unit(['Pa', 'Pascal', 'pascal'], J * m ** -3, namespace=_ns, prefixes=True, doc="Pascal: pressure") ########################################################################### # POWER def_unit(['W', 'Watt', 'watt'], J / s, namespace=_ns, prefixes=True, doc="Watt: power") ########################################################################### # ELECTRICAL def_unit(['A', 'ampere', 'amp'], namespace=_ns, prefixes=True, doc="ampere: base unit of electric current in SI") def_unit(['C', 'coulomb'], A * s, namespace=_ns, prefixes=True, doc="coulomb: electric charge") def_unit(['V', 'Volt', 'volt'], J * C ** -1, namespace=_ns, prefixes=True, doc="Volt: electric potential or electromotive force") def_unit((['Ohm', 'ohm'], ['Ohm']), V * A ** -1, namespace=_ns, prefixes=True, doc="Ohm: electrical resistance", format={'latex': r'\Omega', 'unicode': 'Ω'}) def_unit(['S', 'Siemens', 'siemens'], A * V ** -1, namespace=_ns, prefixes=True, doc="Siemens: electrical conductance") def_unit(['F', 'Farad', 'farad'], C * V ** -1, namespace=_ns, prefixes=True, doc="Farad: electrical capacitance") ########################################################################### # MAGNETIC def_unit(['Wb', 'Weber', 'weber'], V * s, namespace=_ns, prefixes=True, doc="Weber: magnetic flux") def_unit(['T', 'Tesla', 'tesla'], Wb * m ** -2, namespace=_ns, prefixes=True, doc="Tesla: magnetic flux density") def_unit(['H', 'Henry', 'henry'], Wb * A ** -1, namespace=_ns, prefixes=True, doc="Henry: inductance") ########################################################################### # ILLUMINATION def_unit(['cd', 'candela'], namespace=_ns, prefixes=True, doc="candela: base unit of luminous intensity in SI") def_unit(['lm', 'lumen'], cd * sr, namespace=_ns, prefixes=True, doc="lumen: luminous flux") def_unit(['lx', 'lux'], lm * m ** -2, namespace=_ns, prefixes=True, doc="lux: luminous emittance") ########################################################################### # RADIOACTIVITY def_unit(['Bq', 'becquerel'], 1 / s, namespace=_ns, prefixes=False, doc="becquerel: unit of radioactivity") def_unit(['Ci', 'curie'], Bq * 3.7e10, namespace=_ns, prefixes=False, doc="curie: unit of radioactivity") ########################################################################### # BASES bases = set([m, s, kg, A, cd, rad, K, mol]) ########################################################################### # CLEANUP del UnitBase del Unit del def_unit ########################################################################### # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. from .utils import generate_unit_summary as _generate_unit_summary if __doc__ is not None: __doc__ += _generate_unit_summary(globals())
0db56b1c9540edfbf0e9a3c0a52137e910d46f0516486cbe98beca3ee6484a0a
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package defines miscellaneous units. They are also available in the `astropy.units` namespace. """ from astropy.constants import si as _si from . import si from .core import UnitBase, binary_prefixes, def_unit, set_enabled_units, si_prefixes # To ensure si units of the constants can be interpreted. set_enabled_units([si]) import numpy as _numpy _ns = globals() ########################################################################### # AREAS def_unit(['barn', 'barn'], 10 ** -28 * si.m ** 2, namespace=_ns, prefixes=True, doc="barn: unit of area used in HEP") ########################################################################### # ANGULAR MEASUREMENTS def_unit(['cycle', 'cy'], 2.0 * _numpy.pi * si.rad, namespace=_ns, prefixes=False, doc="cycle: angular measurement, a full turn or rotation") def_unit(['spat', 'sp'], 4.0 * _numpy.pi * si.sr, namespace=_ns, prefixes=False, doc="spat: the solid angle of the sphere, 4pi sr") ########################################################################## # PRESSURE def_unit(['bar'], 1e5 * si.Pa, namespace=_ns, prefixes=[(['m'], ['milli'], 1.e-3)], doc="bar: pressure") # The torr is almost the same as mmHg but not quite. # See https://en.wikipedia.org/wiki/Torr # Define the unit here despite it not being an astrophysical unit. # It may be moved if more similar units are created later. def_unit(['Torr', 'torr'], _si.atm.value/760. * si.Pa, namespace=_ns, prefixes=[(['m'], ['milli'], 1.e-3)], doc="Unit of pressure based on an absolute scale, now defined as " "exactly 1/760 of a standard atmosphere") ########################################################################### # MASS def_unit(['M_p'], _si.m_p, namespace=_ns, doc="Proton mass", format={'latex': r'M_{p}', 'unicode': 'Mₚ'}) def_unit(['M_e'], _si.m_e, namespace=_ns, doc="Electron mass", format={'latex': r'M_{e}', 'unicode': 'Mₑ'}) # Unified atomic mass unit def_unit(['u', 'Da', 'Dalton'], _si.u, namespace=_ns, prefixes=True, exclude_prefixes=['a', 'da'], doc="Unified atomic mass unit") ########################################################################### # COMPUTER def_unit((['bit', 'b'], ['bit']), namespace=_ns, prefixes=si_prefixes + binary_prefixes) def_unit((['byte', 'B'], ['byte']), 8 * bit, namespace=_ns, format={'vounit': 'byte'}, prefixes=si_prefixes + binary_prefixes, exclude_prefixes=['d']) def_unit((['pix', 'pixel'], ['pixel']), format={'ogip': 'pixel', 'vounit': 'pixel'}, namespace=_ns, prefixes=True) def_unit((['vox', 'voxel'], ['voxel']), format={'fits': 'voxel', 'ogip': 'voxel', 'vounit': 'voxel'}, namespace=_ns, prefixes=True) ########################################################################### # CLEANUP del UnitBase del def_unit del si ########################################################################### # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. from .utils import generate_unit_summary as _generate_unit_summary if __doc__ is not None: __doc__ += _generate_unit_summary(globals())
55fb26aa45d0335a118c2b93b3e382d1b5271d0fec75e9da8545b860063918ac
# Licensed under a 3-clause BSD style license - see LICENSE.rst """A set of standard astronomical equivalencies.""" import warnings from collections import UserList # THIRD-PARTY import numpy as np # LOCAL from astropy.constants import si as _si from astropy.utils.exceptions import AstropyDeprecationWarning from astropy.utils.misc import isiterable from . import astrophys, cgs, dimensionless_unscaled, misc, si from .core import Unit, UnitsError from .function import units as function_units __all__ = ['parallax', 'spectral', 'spectral_density', 'doppler_radio', 'doppler_optical', 'doppler_relativistic', 'doppler_redshift', 'mass_energy', 'brightness_temperature', 'thermodynamic_temperature', 'beam_angular_area', 'dimensionless_angles', 'logarithmic', 'temperature', 'temperature_energy', 'molar_mass_amu', 'pixel_scale', 'plate_scale', "Equivalency"] class Equivalency(UserList): """ A container for a units equivalency. Attributes ---------- name: `str` The name of the equivalency. kwargs: `dict` Any positional or keyword arguments used to make the equivalency. """ def __init__(self, equiv_list, name='', kwargs=None): self.data = equiv_list self.name = [name] self.kwargs = [kwargs] if kwargs is not None else [dict()] def __add__(self, other): if isinstance(other, Equivalency): new = super().__add__(other) new.name = self.name[:] + other.name new.kwargs = self.kwargs[:] + other.kwargs return new else: return self.data.__add__(other) def __eq__(self, other): return (isinstance(other, self.__class__) and self.name == other.name and self.kwargs == other.kwargs) def dimensionless_angles(): """Allow angles to be equivalent to dimensionless (with 1 rad = 1 m/m = 1). It is special compared to other equivalency pairs in that it allows this independent of the power to which the angle is raised, and independent of whether it is part of a more complicated unit. """ return Equivalency([(si.radian, None)], "dimensionless_angles") def logarithmic(): """Allow logarithmic units to be converted to dimensionless fractions""" return Equivalency([ (dimensionless_unscaled, function_units.dex, np.log10, lambda x: 10.**x) ], "logarithmic") def parallax(): """ Returns a list of equivalence pairs that handle the conversion between parallax angle and distance. """ def parallax_converter(x): x = np.asanyarray(x) d = 1 / x if isiterable(d): d[d < 0] = np.nan return d else: if d < 0: return np.array(np.nan) else: return d return Equivalency([ (si.arcsecond, astrophys.parsec, parallax_converter) ], "parallax") def spectral(): """ Returns a list of equivalence pairs that handle spectral wavelength, wave number, frequency, and energy equivalencies. Allows conversions between wavelength units, wave number units, frequency units, and energy units as they relate to light. There are two types of wave number: * spectroscopic - :math:`1 / \\lambda` (per meter) * angular - :math:`2 \\pi / \\lambda` (radian per meter) """ hc = _si.h.value * _si.c.value two_pi = 2.0 * np.pi inv_m_spec = si.m ** -1 inv_m_ang = si.radian / si.m return Equivalency([ (si.m, si.Hz, lambda x: _si.c.value / x), (si.m, si.J, lambda x: hc / x), (si.Hz, si.J, lambda x: _si.h.value * x, lambda x: x / _si.h.value), (si.m, inv_m_spec, lambda x: 1.0 / x), (si.Hz, inv_m_spec, lambda x: x / _si.c.value, lambda x: _si.c.value * x), (si.J, inv_m_spec, lambda x: x / hc, lambda x: hc * x), (inv_m_spec, inv_m_ang, lambda x: x * two_pi, lambda x: x / two_pi), (si.m, inv_m_ang, lambda x: two_pi / x), (si.Hz, inv_m_ang, lambda x: two_pi * x / _si.c.value, lambda x: _si.c.value * x / two_pi), (si.J, inv_m_ang, lambda x: x * two_pi / hc, lambda x: hc * x / two_pi) ], "spectral") def spectral_density(wav, factor=None): """ Returns a list of equivalence pairs that handle spectral density with regard to wavelength and frequency. Parameters ---------- wav : `~astropy.units.Quantity` `~astropy.units.Quantity` associated with values being converted (e.g., wavelength or frequency). Notes ----- The ``factor`` argument is left for backward-compatibility with the syntax ``spectral_density(unit, factor)`` but users are encouraged to use ``spectral_density(factor * unit)`` instead. """ from .core import UnitBase if isinstance(wav, UnitBase): if factor is None: raise ValueError( 'If `wav` is specified as a unit, `factor` should be set') wav = factor * wav # Convert to Quantity c_Aps = _si.c.to_value(si.AA / si.s) # Angstrom/s h_cgs = _si.h.cgs.value # erg * s hc = c_Aps * h_cgs # flux density f_la = cgs.erg / si.angstrom / si.cm ** 2 / si.s f_nu = cgs.erg / si.Hz / si.cm ** 2 / si.s nu_f_nu = cgs.erg / si.cm ** 2 / si.s la_f_la = nu_f_nu phot_f_la = astrophys.photon / (si.cm ** 2 * si.s * si.AA) phot_f_nu = astrophys.photon / (si.cm ** 2 * si.s * si.Hz) la_phot_f_la = astrophys.photon / (si.cm ** 2 * si.s) # luminosity density L_nu = cgs.erg / si.s / si.Hz L_la = cgs.erg / si.s / si.angstrom nu_L_nu = cgs.erg / si.s la_L_la = nu_L_nu phot_L_la = astrophys.photon / (si.s * si.AA) phot_L_nu = astrophys.photon / (si.s * si.Hz) # surface brightness (flux equiv) S_la = cgs.erg / si.angstrom / si.cm ** 2 / si.s / si.sr S_nu = cgs.erg / si.Hz / si.cm ** 2 / si.s / si.sr nu_S_nu = cgs.erg / si.cm ** 2 / si.s / si.sr la_S_la = nu_S_nu phot_S_la = astrophys.photon / (si.cm ** 2 * si.s * si.AA * si.sr) phot_S_nu = astrophys.photon / (si.cm ** 2 * si.s * si.Hz * si.sr) # surface brightness (luminosity equiv) SL_nu = cgs.erg / si.s / si.Hz / si.sr SL_la = cgs.erg / si.s / si.angstrom / si.sr nu_SL_nu = cgs.erg / si.s / si.sr la_SL_la = nu_SL_nu phot_SL_la = astrophys.photon / (si.s * si.AA * si.sr) phot_SL_nu = astrophys.photon / (si.s * si.Hz * si.sr) def converter(x): return x * (wav.to_value(si.AA, spectral()) ** 2 / c_Aps) def iconverter(x): return x / (wav.to_value(si.AA, spectral()) ** 2 / c_Aps) def converter_f_nu_to_nu_f_nu(x): return x * wav.to_value(si.Hz, spectral()) def iconverter_f_nu_to_nu_f_nu(x): return x / wav.to_value(si.Hz, spectral()) def converter_f_la_to_la_f_la(x): return x * wav.to_value(si.AA, spectral()) def iconverter_f_la_to_la_f_la(x): return x / wav.to_value(si.AA, spectral()) def converter_phot_f_la_to_f_la(x): return hc * x / wav.to_value(si.AA, spectral()) def iconverter_phot_f_la_to_f_la(x): return x * wav.to_value(si.AA, spectral()) / hc def converter_phot_f_la_to_f_nu(x): return h_cgs * x * wav.to_value(si.AA, spectral()) def iconverter_phot_f_la_to_f_nu(x): return x / (wav.to_value(si.AA, spectral()) * h_cgs) def converter_phot_f_la_phot_f_nu(x): return x * wav.to_value(si.AA, spectral()) ** 2 / c_Aps def iconverter_phot_f_la_phot_f_nu(x): return c_Aps * x / wav.to_value(si.AA, spectral()) ** 2 converter_phot_f_nu_to_f_nu = converter_phot_f_la_to_f_la iconverter_phot_f_nu_to_f_nu = iconverter_phot_f_la_to_f_la def converter_phot_f_nu_to_f_la(x): return x * hc * c_Aps / wav.to_value(si.AA, spectral()) ** 3 def iconverter_phot_f_nu_to_f_la(x): return x * wav.to_value(si.AA, spectral()) ** 3 / (hc * c_Aps) # for luminosity density converter_L_nu_to_nu_L_nu = converter_f_nu_to_nu_f_nu iconverter_L_nu_to_nu_L_nu = iconverter_f_nu_to_nu_f_nu converter_L_la_to_la_L_la = converter_f_la_to_la_f_la iconverter_L_la_to_la_L_la = iconverter_f_la_to_la_f_la converter_phot_L_la_to_L_la = converter_phot_f_la_to_f_la iconverter_phot_L_la_to_L_la = iconverter_phot_f_la_to_f_la converter_phot_L_la_to_L_nu = converter_phot_f_la_to_f_nu iconverter_phot_L_la_to_L_nu = iconverter_phot_f_la_to_f_nu converter_phot_L_la_phot_L_nu = converter_phot_f_la_phot_f_nu iconverter_phot_L_la_phot_L_nu = iconverter_phot_f_la_phot_f_nu converter_phot_L_nu_to_L_nu = converter_phot_f_nu_to_f_nu iconverter_phot_L_nu_to_L_nu = iconverter_phot_f_nu_to_f_nu converter_phot_L_nu_to_L_la = converter_phot_f_nu_to_f_la iconverter_phot_L_nu_to_L_la = iconverter_phot_f_nu_to_f_la return Equivalency([ # flux (f_la, f_nu, converter, iconverter), (f_nu, nu_f_nu, converter_f_nu_to_nu_f_nu, iconverter_f_nu_to_nu_f_nu), (f_la, la_f_la, converter_f_la_to_la_f_la, iconverter_f_la_to_la_f_la), (phot_f_la, f_la, converter_phot_f_la_to_f_la, iconverter_phot_f_la_to_f_la), (phot_f_la, f_nu, converter_phot_f_la_to_f_nu, iconverter_phot_f_la_to_f_nu), (phot_f_la, phot_f_nu, converter_phot_f_la_phot_f_nu, iconverter_phot_f_la_phot_f_nu), (phot_f_nu, f_nu, converter_phot_f_nu_to_f_nu, iconverter_phot_f_nu_to_f_nu), (phot_f_nu, f_la, converter_phot_f_nu_to_f_la, iconverter_phot_f_nu_to_f_la), # integrated flux (la_phot_f_la, la_f_la, converter_phot_f_la_to_f_la, iconverter_phot_f_la_to_f_la), # luminosity (L_la, L_nu, converter, iconverter), (L_nu, nu_L_nu, converter_L_nu_to_nu_L_nu, iconverter_L_nu_to_nu_L_nu), (L_la, la_L_la, converter_L_la_to_la_L_la, iconverter_L_la_to_la_L_la), (phot_L_la, L_la, converter_phot_L_la_to_L_la, iconverter_phot_L_la_to_L_la), (phot_L_la, L_nu, converter_phot_L_la_to_L_nu, iconverter_phot_L_la_to_L_nu), (phot_L_la, phot_L_nu, converter_phot_L_la_phot_L_nu, iconverter_phot_L_la_phot_L_nu), (phot_L_nu, L_nu, converter_phot_L_nu_to_L_nu, iconverter_phot_L_nu_to_L_nu), (phot_L_nu, L_la, converter_phot_L_nu_to_L_la, iconverter_phot_L_nu_to_L_la), # surface brightness (flux equiv) (S_la, S_nu, converter, iconverter), (S_nu, nu_S_nu, converter_f_nu_to_nu_f_nu, iconverter_f_nu_to_nu_f_nu), (S_la, la_S_la, converter_f_la_to_la_f_la, iconverter_f_la_to_la_f_la), (phot_S_la, S_la, converter_phot_f_la_to_f_la, iconverter_phot_f_la_to_f_la), (phot_S_la, S_nu, converter_phot_f_la_to_f_nu, iconverter_phot_f_la_to_f_nu), (phot_S_la, phot_S_nu, converter_phot_f_la_phot_f_nu, iconverter_phot_f_la_phot_f_nu), (phot_S_nu, S_nu, converter_phot_f_nu_to_f_nu, iconverter_phot_f_nu_to_f_nu), (phot_S_nu, S_la, converter_phot_f_nu_to_f_la, iconverter_phot_f_nu_to_f_la), # surface brightness (luminosity equiv) (SL_la, SL_nu, converter, iconverter), (SL_nu, nu_SL_nu, converter_L_nu_to_nu_L_nu, iconverter_L_nu_to_nu_L_nu), (SL_la, la_SL_la, converter_L_la_to_la_L_la, iconverter_L_la_to_la_L_la), (phot_SL_la, SL_la, converter_phot_L_la_to_L_la, iconverter_phot_L_la_to_L_la), (phot_SL_la, SL_nu, converter_phot_L_la_to_L_nu, iconverter_phot_L_la_to_L_nu), (phot_SL_la, phot_SL_nu, converter_phot_L_la_phot_L_nu, iconverter_phot_L_la_phot_L_nu), (phot_SL_nu, SL_nu, converter_phot_L_nu_to_L_nu, iconverter_phot_L_nu_to_L_nu), (phot_SL_nu, SL_la, converter_phot_L_nu_to_L_la, iconverter_phot_L_nu_to_L_la), ], "spectral_density", {'wav': wav, 'factor': factor}) def doppler_radio(rest): r""" Return the equivalency pairs for the radio convention for velocity. The radio convention for the relation between velocity and frequency is: :math:`V = c \frac{f_0 - f}{f_0} ; f(V) = f_0 ( 1 - V/c )` Parameters ---------- rest : `~astropy.units.Quantity` Any quantity supported by the standard spectral equivalencies (wavelength, energy, frequency, wave number). References ---------- `NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_ Examples -------- >>> import astropy.units as u >>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz >>> radio_CO_equiv = u.doppler_radio(CO_restfreq) >>> measured_freq = 115.2832*u.GHz >>> radio_velocity = measured_freq.to(u.km/u.s, equivalencies=radio_CO_equiv) >>> radio_velocity # doctest: +FLOAT_CMP <Quantity -31.209092088877583 km / s> """ assert_is_spectral_unit(rest) ckms = _si.c.to_value('km/s') def to_vel_freq(x): restfreq = rest.to_value(si.Hz, equivalencies=spectral()) return (restfreq-x) / (restfreq) * ckms def from_vel_freq(x): restfreq = rest.to_value(si.Hz, equivalencies=spectral()) voverc = x/ckms return restfreq * (1-voverc) def to_vel_wav(x): restwav = rest.to_value(si.AA, spectral()) return (x-restwav) / (x) * ckms def from_vel_wav(x): restwav = rest.to_value(si.AA, spectral()) return restwav * ckms / (ckms-x) def to_vel_en(x): resten = rest.to_value(si.eV, equivalencies=spectral()) return (resten-x) / (resten) * ckms def from_vel_en(x): resten = rest.to_value(si.eV, equivalencies=spectral()) voverc = x/ckms return resten * (1-voverc) return Equivalency([(si.Hz, si.km/si.s, to_vel_freq, from_vel_freq), (si.AA, si.km/si.s, to_vel_wav, from_vel_wav), (si.eV, si.km/si.s, to_vel_en, from_vel_en), ], "doppler_radio", {'rest': rest}) def doppler_optical(rest): r""" Return the equivalency pairs for the optical convention for velocity. The optical convention for the relation between velocity and frequency is: :math:`V = c \frac{f_0 - f}{f } ; f(V) = f_0 ( 1 + V/c )^{-1}` Parameters ---------- rest : `~astropy.units.Quantity` Any quantity supported by the standard spectral equivalencies (wavelength, energy, frequency, wave number). References ---------- `NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_ Examples -------- >>> import astropy.units as u >>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz >>> optical_CO_equiv = u.doppler_optical(CO_restfreq) >>> measured_freq = 115.2832*u.GHz >>> optical_velocity = measured_freq.to(u.km/u.s, equivalencies=optical_CO_equiv) >>> optical_velocity # doctest: +FLOAT_CMP <Quantity -31.20584348799674 km / s> """ assert_is_spectral_unit(rest) ckms = _si.c.to_value('km/s') def to_vel_freq(x): restfreq = rest.to_value(si.Hz, equivalencies=spectral()) return ckms * (restfreq-x) / x def from_vel_freq(x): restfreq = rest.to_value(si.Hz, equivalencies=spectral()) voverc = x/ckms return restfreq / (1+voverc) def to_vel_wav(x): restwav = rest.to_value(si.AA, spectral()) return ckms * (x/restwav-1) def from_vel_wav(x): restwav = rest.to_value(si.AA, spectral()) voverc = x/ckms return restwav * (1+voverc) def to_vel_en(x): resten = rest.to_value(si.eV, equivalencies=spectral()) return ckms * (resten-x) / x def from_vel_en(x): resten = rest.to_value(si.eV, equivalencies=spectral()) voverc = x/ckms return resten / (1+voverc) return Equivalency([(si.Hz, si.km/si.s, to_vel_freq, from_vel_freq), (si.AA, si.km/si.s, to_vel_wav, from_vel_wav), (si.eV, si.km/si.s, to_vel_en, from_vel_en), ], "doppler_optical", {'rest': rest}) def doppler_relativistic(rest): r""" Return the equivalency pairs for the relativistic convention for velocity. The full relativistic convention for the relation between velocity and frequency is: :math:`V = c \frac{f_0^2 - f^2}{f_0^2 + f^2} ; f(V) = f_0 \frac{\left(1 - (V/c)^2\right)^{1/2}}{(1+V/c)}` Parameters ---------- rest : `~astropy.units.Quantity` Any quantity supported by the standard spectral equivalencies (wavelength, energy, frequency, wave number). References ---------- `NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_ Examples -------- >>> import astropy.units as u >>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz >>> relativistic_CO_equiv = u.doppler_relativistic(CO_restfreq) >>> measured_freq = 115.2832*u.GHz >>> relativistic_velocity = measured_freq.to(u.km/u.s, equivalencies=relativistic_CO_equiv) >>> relativistic_velocity # doctest: +FLOAT_CMP <Quantity -31.207467619351537 km / s> >>> measured_velocity = 1250 * u.km/u.s >>> relativistic_frequency = measured_velocity.to(u.GHz, equivalencies=relativistic_CO_equiv) >>> relativistic_frequency # doctest: +FLOAT_CMP <Quantity 114.79156866993588 GHz> >>> relativistic_wavelength = measured_velocity.to(u.mm, equivalencies=relativistic_CO_equiv) >>> relativistic_wavelength # doctest: +FLOAT_CMP <Quantity 2.6116243681798923 mm> """ # noqa: E501 assert_is_spectral_unit(rest) ckms = _si.c.to_value('km/s') def to_vel_freq(x): restfreq = rest.to_value(si.Hz, equivalencies=spectral()) return (restfreq**2-x**2) / (restfreq**2+x**2) * ckms def from_vel_freq(x): restfreq = rest.to_value(si.Hz, equivalencies=spectral()) voverc = x/ckms return restfreq * ((1-voverc) / (1+(voverc)))**0.5 def to_vel_wav(x): restwav = rest.to_value(si.AA, spectral()) return (x**2-restwav**2) / (restwav**2+x**2) * ckms def from_vel_wav(x): restwav = rest.to_value(si.AA, spectral()) voverc = x/ckms return restwav * ((1+voverc) / (1-voverc))**0.5 def to_vel_en(x): resten = rest.to_value(si.eV, spectral()) return (resten**2-x**2) / (resten**2+x**2) * ckms def from_vel_en(x): resten = rest.to_value(si.eV, spectral()) voverc = x/ckms return resten * ((1-voverc) / (1+(voverc)))**0.5 return Equivalency([(si.Hz, si.km/si.s, to_vel_freq, from_vel_freq), (si.AA, si.km/si.s, to_vel_wav, from_vel_wav), (si.eV, si.km/si.s, to_vel_en, from_vel_en), ], "doppler_relativistic", {'rest': rest}) def doppler_redshift(): """ Returns the equivalence between Doppler redshift (unitless) and radial velocity. .. note:: This equivalency is not compatible with cosmological redshift in `astropy.cosmology.units`. """ rv_unit = si.km / si.s C_KMS = _si.c.to_value(rv_unit) def convert_z_to_rv(z): zponesq = (1 + z) ** 2 return C_KMS * (zponesq - 1) / (zponesq + 1) def convert_rv_to_z(rv): beta = rv / C_KMS return np.sqrt((1 + beta) / (1 - beta)) - 1 return Equivalency([(dimensionless_unscaled, rv_unit, convert_z_to_rv, convert_rv_to_z)], "doppler_redshift") def molar_mass_amu(): """ Returns the equivalence between amu and molar mass. """ return Equivalency([ (si.g/si.mol, misc.u) ], "molar_mass_amu") def mass_energy(): """ Returns a list of equivalence pairs that handle the conversion between mass and energy. """ return Equivalency([(si.kg, si.J, lambda x: x * _si.c.value ** 2, lambda x: x / _si.c.value ** 2), (si.kg / si.m ** 2, si.J / si.m ** 2, lambda x: x * _si.c.value ** 2, lambda x: x / _si.c.value ** 2), (si.kg / si.m ** 3, si.J / si.m ** 3, lambda x: x * _si.c.value ** 2, lambda x: x / _si.c.value ** 2), (si.kg / si.s, si.J / si.s, lambda x: x * _si.c.value ** 2, lambda x: x / _si.c.value ** 2), ], "mass_energy") def brightness_temperature(frequency, beam_area=None): r""" Defines the conversion between Jy/sr and "brightness temperature", :math:`T_B`, in Kelvins. The brightness temperature is a unit very commonly used in radio astronomy. See, e.g., "Tools of Radio Astronomy" (Wilson 2009) eqn 8.16 and eqn 8.19 (these pages are available on `google books <https://books.google.com/books?id=9KHw6R8rQEMC&pg=PA179&source=gbs_toc_r&cad=4#v=onepage&q&f=false>`__). :math:`T_B \equiv S_\nu / \left(2 k \nu^2 / c^2 \right)` If the input is in Jy/beam or Jy (assuming it came from a single beam), the beam area is essential for this computation: the brightness temperature is inversely proportional to the beam area. Parameters ---------- frequency : `~astropy.units.Quantity` The observed ``spectral`` equivalent `~astropy.units.Unit` (e.g., frequency or wavelength). The variable is named 'frequency' because it is more commonly used in radio astronomy. BACKWARD COMPATIBILITY NOTE: previous versions of the brightness temperature equivalency used the keyword ``disp``, which is no longer supported. beam_area : `~astropy.units.Quantity` ['solid angle'] Beam area in angular units, i.e. steradian equivalent Examples -------- Arecibo C-band beam:: >>> import numpy as np >>> from astropy import units as u >>> beam_sigma = 50*u.arcsec >>> beam_area = 2*np.pi*(beam_sigma)**2 >>> freq = 5*u.GHz >>> equiv = u.brightness_temperature(freq) >>> (1*u.Jy/beam_area).to(u.K, equivalencies=equiv) # doctest: +FLOAT_CMP <Quantity 3.526295144567176 K> VLA synthetic beam:: >>> bmaj = 15*u.arcsec >>> bmin = 15*u.arcsec >>> fwhm_to_sigma = 1./(8*np.log(2))**0.5 >>> beam_area = 2.*np.pi*(bmaj*bmin*fwhm_to_sigma**2) >>> freq = 5*u.GHz >>> equiv = u.brightness_temperature(freq) >>> (u.Jy/beam_area).to(u.K, equivalencies=equiv) # doctest: +FLOAT_CMP <Quantity 217.2658703625732 K> Any generic surface brightness: >>> surf_brightness = 1e6*u.MJy/u.sr >>> surf_brightness.to(u.K, equivalencies=u.brightness_temperature(500*u.GHz)) # doctest: +FLOAT_CMP <Quantity 130.1931904778803 K> """ # noqa: E501 if frequency.unit.is_equivalent(si.sr): if not beam_area.unit.is_equivalent(si.Hz): raise ValueError("The inputs to `brightness_temperature` are " "frequency and angular area.") warnings.warn("The inputs to `brightness_temperature` have changed. " "Frequency is now the first input, and angular area " "is the second, optional input.", AstropyDeprecationWarning) frequency, beam_area = beam_area, frequency nu = frequency.to(si.GHz, spectral()) if beam_area is not None: beam = beam_area.to_value(si.sr) def convert_Jy_to_K(x_jybm): factor = (2 * _si.k_B * si.K * nu**2 / _si.c**2).to(astrophys.Jy).value return (x_jybm / beam / factor) def convert_K_to_Jy(x_K): factor = (astrophys.Jy / (2 * _si.k_B * nu**2 / _si.c**2)).to(si.K).value return (x_K * beam / factor) return Equivalency([(astrophys.Jy, si.K, convert_Jy_to_K, convert_K_to_Jy), (astrophys.Jy/astrophys.beam, si.K, convert_Jy_to_K, convert_K_to_Jy)], "brightness_temperature", {'frequency': frequency, 'beam_area': beam_area}) # noqa: E501 else: def convert_JySr_to_K(x_jysr): factor = (2 * _si.k_B * si.K * nu**2 / _si.c**2).to(astrophys.Jy).value return (x_jysr / factor) def convert_K_to_JySr(x_K): factor = (astrophys.Jy / (2 * _si.k_B * nu**2 / _si.c**2)).to(si.K).value return (x_K / factor) # multiplied by 1x for 1 steradian return Equivalency([(astrophys.Jy/si.sr, si.K, convert_JySr_to_K, convert_K_to_JySr)], "brightness_temperature", {'frequency': frequency, 'beam_area': beam_area}) # noqa: E501 def beam_angular_area(beam_area): """ Convert between the ``beam`` unit, which is commonly used to express the area of a radio telescope resolution element, and an area on the sky. This equivalency also supports direct conversion between ``Jy/beam`` and ``Jy/steradian`` units, since that is a common operation. Parameters ---------- beam_area : unit-like The area of the beam in angular area units (e.g., steradians) Must have angular area equivalent units. """ return Equivalency([(astrophys.beam, Unit(beam_area)), (astrophys.beam**-1, Unit(beam_area)**-1), (astrophys.Jy/astrophys.beam, astrophys.Jy/Unit(beam_area))], "beam_angular_area", {'beam_area': beam_area}) def thermodynamic_temperature(frequency, T_cmb=None): r"""Defines the conversion between Jy/sr and "thermodynamic temperature", :math:`T_{CMB}`, in Kelvins. The thermodynamic temperature is a unit very commonly used in cosmology. See eqn 8 in [1] :math:`K_{CMB} \equiv I_\nu / \left(2 k \nu^2 / c^2 f(\nu) \right)` with :math:`f(\nu) = \frac{ x^2 e^x}{(e^x - 1 )^2}` where :math:`x = h \nu / k T` Parameters ---------- frequency : `~astropy.units.Quantity` The observed `spectral` equivalent `~astropy.units.Unit` (e.g., frequency or wavelength). Must have spectral units. T_cmb : `~astropy.units.Quantity` ['temperature'] or None The CMB temperature at z=0. If `None`, the default cosmology will be used to get this temperature. Must have units of temperature. Notes ----- For broad band receivers, this conversion do not hold as it highly depends on the frequency References ---------- .. [1] Planck 2013 results. IX. HFI spectral response https://arxiv.org/abs/1303.5070 Examples -------- Planck HFI 143 GHz:: >>> from astropy import units as u >>> from astropy.cosmology import Planck15 >>> freq = 143 * u.GHz >>> equiv = u.thermodynamic_temperature(freq, Planck15.Tcmb0) >>> (1. * u.mK).to(u.MJy / u.sr, equivalencies=equiv) # doctest: +FLOAT_CMP <Quantity 0.37993172 MJy / sr> """ nu = frequency.to(si.GHz, spectral()) if T_cmb is None: from astropy.cosmology import default_cosmology T_cmb = default_cosmology.get().Tcmb0 def f(nu, T_cmb=T_cmb): x = _si.h * nu / _si.k_B / T_cmb return x**2 * np.exp(x) / np.expm1(x)**2 def convert_Jy_to_K(x_jybm): factor = (f(nu) * 2 * _si.k_B * si.K * nu**2 / _si.c**2).to_value(astrophys.Jy) return x_jybm / factor def convert_K_to_Jy(x_K): factor = (astrophys.Jy / (f(nu) * 2 * _si.k_B * nu**2 / _si.c**2)).to_value(si.K) return x_K / factor return Equivalency([(astrophys.Jy/si.sr, si.K, convert_Jy_to_K, convert_K_to_Jy)], "thermodynamic_temperature", {'frequency': frequency, "T_cmb": T_cmb}) def temperature(): """Convert between Kelvin, Celsius, Rankine and Fahrenheit here because Unit and CompositeUnit cannot do addition or subtraction properly. """ from .imperial import deg_F, deg_R return Equivalency([ (si.K, si.deg_C, lambda x: x - 273.15, lambda x: x + 273.15), (si.deg_C, deg_F, lambda x: x * 1.8 + 32.0, lambda x: (x - 32.0) / 1.8), (si.K, deg_F, lambda x: (x - 273.15) * 1.8 + 32.0, lambda x: ((x - 32.0) / 1.8) + 273.15), (deg_R, deg_F, lambda x: x - 459.67, lambda x: x + 459.67), (deg_R, si.deg_C, lambda x: (x - 491.67) * (5/9), lambda x: x * 1.8 + 491.67), (deg_R, si.K, lambda x: x * (5/9), lambda x: x * 1.8)], "temperature") def temperature_energy(): """Convert between Kelvin and keV(eV) to an equivalent amount.""" return Equivalency([ (si.K, si.eV, lambda x: x / (_si.e.value / _si.k_B.value), lambda x: x * (_si.e.value / _si.k_B.value))], "temperature_energy") def assert_is_spectral_unit(value): try: value.to(si.Hz, spectral()) except (AttributeError, UnitsError) as ex: raise UnitsError("The 'rest' value must be a spectral equivalent " "(frequency, wavelength, or energy).") def pixel_scale(pixscale): """ Convert between pixel distances (in units of ``pix``) and other units, given a particular ``pixscale``. Parameters ---------- pixscale : `~astropy.units.Quantity` The pixel scale either in units of <unit>/pixel or pixel/<unit>. """ decomposed = pixscale.unit.decompose() dimensions = dict(zip(decomposed.bases, decomposed.powers)) pix_power = dimensions.get(misc.pix, 0) if pix_power == -1: physical_unit = Unit(pixscale * misc.pix) elif pix_power == 1: physical_unit = Unit(misc.pix / pixscale) else: raise UnitsError( "The pixel scale unit must have" " pixel dimensionality of 1 or -1.") return Equivalency([(misc.pix, physical_unit)], "pixel_scale", {'pixscale': pixscale}) def plate_scale(platescale): """ Convert between lengths (to be interpreted as lengths in the focal plane) and angular units with a specified ``platescale``. Parameters ---------- platescale : `~astropy.units.Quantity` The pixel scale either in units of distance/pixel or distance/angle. """ if platescale.unit.is_equivalent(si.arcsec/si.m): platescale_val = platescale.to_value(si.radian/si.m) elif platescale.unit.is_equivalent(si.m/si.arcsec): platescale_val = (1/platescale).to_value(si.radian/si.m) else: raise UnitsError("The pixel scale must be in angle/distance or " "distance/angle") return Equivalency([(si.m, si.radian, lambda d: d*platescale_val, lambda rad: rad/platescale_val)], "plate_scale", {'platescale': platescale}) # ------------------------------------------------------------------------- def __getattr__(attr): if attr == "with_H0": import warnings from astropy.cosmology.units import with_H0 from astropy.utils.exceptions import AstropyDeprecationWarning warnings.warn( ("`with_H0` is deprecated from `astropy.units.equivalencies` " "since astropy 5.0 and may be removed in a future version. " "Use `astropy.cosmology.units.with_H0` instead."), AstropyDeprecationWarning) return with_H0 raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
f7d7c0a22b63048da2d1a0ecc6c971d850c8c02b208140da375304ef9a55d8f5
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains convenience functions for retrieving solar system ephemerides from jplephem. """ from urllib.parse import urlparse import os.path import re import numpy as np import erfa from .sky_coordinate import SkyCoord from astropy.utils.data import download_file from astropy.utils.decorators import classproperty, deprecated from astropy.utils.state import ScienceState from astropy.utils import indent from astropy import units as u from astropy.constants import c as speed_of_light from .representation import CartesianRepresentation, CartesianDifferential from .builtin_frames import GCRS, ICRS, ITRS, TETE from .builtin_frames.utils import get_jd12 __all__ = ["get_body", "get_moon", "get_body_barycentric", "get_body_barycentric_posvel", "solar_system_ephemeris"] DEFAULT_JPL_EPHEMERIS = 'de430' """List of kernel pairs needed to calculate positions of a given object.""" BODY_NAME_TO_KERNEL_SPEC = { 'sun': [(0, 10)], 'mercury': [(0, 1), (1, 199)], 'venus': [(0, 2), (2, 299)], 'earth-moon-barycenter': [(0, 3)], 'earth': [(0, 3), (3, 399)], 'moon': [(0, 3), (3, 301)], 'mars': [(0, 4)], 'jupiter': [(0, 5)], 'saturn': [(0, 6)], 'uranus': [(0, 7)], 'neptune': [(0, 8)], 'pluto': [(0, 9)], } """Indices to the plan94 routine for the given object.""" PLAN94_BODY_NAME_TO_PLANET_INDEX = { 'mercury': 1, 'venus': 2, 'earth-moon-barycenter': 3, 'mars': 4, 'jupiter': 5, 'saturn': 6, 'uranus': 7, 'neptune': 8, } _EPHEMERIS_NOTE = """ You can either give an explicit ephemeris or use a default, which is normally a built-in ephemeris that does not require ephemeris files. To change the default to be the JPL ephemeris:: >>> from astropy.coordinates import solar_system_ephemeris >>> solar_system_ephemeris.set('jpl') # doctest: +SKIP Use of any JPL ephemeris requires the jplephem package (https://pypi.org/project/jplephem/). If needed, the ephemeris file will be downloaded (and cached). One can check which bodies are covered by a given ephemeris using:: >>> solar_system_ephemeris.bodies ('earth', 'sun', 'moon', 'mercury', 'venus', 'earth-moon-barycenter', 'mars', 'jupiter', 'saturn', 'uranus', 'neptune') """[1:-1] class solar_system_ephemeris(ScienceState): """Default ephemerides for calculating positions of Solar-System bodies. This can be one of the following: - 'builtin': polynomial approximations to the orbital elements. - 'dexxx[s]', for a JPL dynamical model, where xxx is the three digit version number (e.g. de430), and the 's' is optional to specify the 'small' version of a kernel. The version number must correspond to an ephemeris file available at: https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/ - 'jpl': Alias for the default JPL ephemeris (currently, 'de430'). - URL: (str) The url to a SPK ephemeris in SPICE binary (.bsp) format. - PATH: (str) File path to a SPK ephemeris in SPICE binary (.bsp) format. - `None`: Ensure an Exception is raised without an explicit ephemeris. The default is 'builtin', which uses the ``epv00`` and ``plan94`` routines from the ``erfa`` implementation of the Standards Of Fundamental Astronomy library. Notes ----- Any file required will be downloaded (and cached) when the state is set. The default Satellite Planet Kernel (SPK) file from NASA JPL (de430) is ~120MB, and covers years ~1550-2650 CE [1]_. The smaller de432s file is ~10MB, and covers years 1950-2050 [2]_ (and similarly for the newer de440 and de440s). Older versions of the JPL ephemerides (such as the widely used de200) can be used via their URL [3]_. .. [1] https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/aareadme_de430-de431.txt .. [2] https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/aareadme_de432s.txt .. [3] https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/a_old_versions/ """ _value = 'builtin' _kernel = None @classmethod def validate(cls, value): # make no changes if value is None if value is None: return cls._value # Set up Kernel; if the file is not in cache, this will download it. cls.get_kernel(value) return value @classmethod def get_kernel(cls, value): # ScienceState only ensures the `_value` attribute is up to date, # so we need to be sure any kernel returned is consistent. if cls._kernel is None or cls._kernel.origin != value: if cls._kernel is not None: cls._kernel.daf.file.close() cls._kernel = None kernel = _get_kernel(value) if kernel is not None: kernel.origin = value cls._kernel = kernel return cls._kernel @classproperty def kernel(cls): return cls.get_kernel(cls._value) @classproperty def bodies(cls): if cls._value is None: return None if cls._value.lower() == 'builtin': return (('earth', 'sun', 'moon') + tuple(PLAN94_BODY_NAME_TO_PLANET_INDEX.keys())) else: return tuple(BODY_NAME_TO_KERNEL_SPEC.keys()) def _get_kernel(value): """ Try importing jplephem, download/retrieve from cache the Satellite Planet Kernel corresponding to the given ephemeris. """ if value is None or value.lower() == 'builtin': return None try: from jplephem.spk import SPK except ImportError: raise ImportError("Solar system JPL ephemeris calculations require " "the jplephem package " "(https://pypi.org/project/jplephem/)") if value.lower() == 'jpl': # Get the default JPL ephemeris URL value = DEFAULT_JPL_EPHEMERIS if re.compile(r'de[0-9][0-9][0-9]s?').match(value.lower()): value = ('https://naif.jpl.nasa.gov/pub/naif/generic_kernels' '/spk/planets/{:s}.bsp'.format(value.lower())) elif os.path.isfile(value): return SPK.open(value) else: try: urlparse(value) except Exception: raise ValueError('{} was not one of the standard strings and ' 'could not be parsed as a file path or URL'.format(value)) return SPK.open(download_file(value, cache=True)) def _get_body_barycentric_posvel(body, time, ephemeris=None, get_velocity=True): """Calculate the barycentric position (and velocity) of a solar system body. Parameters ---------- body : str or other The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. ephemeris : str, optional Ephemeris to use. By default, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` get_velocity : bool, optional Whether or not to calculate the velocity as well as the position. Returns ------- position : `~astropy.coordinates.CartesianRepresentation` or tuple Barycentric (ICRS) position or tuple of position and velocity. Notes ----- Whether or not velocities are calculated makes little difference for the built-in ephemerides, but for most JPL ephemeris files, the execution time roughly doubles. """ # If the ephemeris is to be taken from solar_system_ephemeris, or the one # it already contains, use the kernel there. Otherwise, open the ephemeris, # possibly downloading it, but make sure the file is closed at the end. default_kernel = ephemeris is None or ephemeris is solar_system_ephemeris._value kernel = None try: if default_kernel: if solar_system_ephemeris.get() is None: raise ValueError(_EPHEMERIS_NOTE) kernel = solar_system_ephemeris.kernel else: kernel = _get_kernel(ephemeris) jd1, jd2 = get_jd12(time, 'tdb') if kernel is None: body = body.lower() earth_pv_helio, earth_pv_bary = erfa.epv00(jd1, jd2) if body == 'earth': body_pv_bary = earth_pv_bary elif body == 'moon': # The moon98 documentation notes that it takes TT, but that TDB leads # to errors smaller than the uncertainties in the algorithm. # moon98 returns the astrometric position relative to the Earth. moon_pv_geo = erfa.moon98(jd1, jd2) body_pv_bary = erfa.pvppv(moon_pv_geo, earth_pv_bary) else: sun_pv_bary = erfa.pvmpv(earth_pv_bary, earth_pv_helio) if body == 'sun': body_pv_bary = sun_pv_bary else: try: body_index = PLAN94_BODY_NAME_TO_PLANET_INDEX[body] except KeyError: raise KeyError("{}'s position and velocity cannot be " "calculated with the '{}' ephemeris." .format(body, ephemeris)) body_pv_helio = erfa.plan94(jd1, jd2, body_index) body_pv_bary = erfa.pvppv(body_pv_helio, sun_pv_bary) body_pos_bary = CartesianRepresentation( body_pv_bary['p'], unit=u.au, xyz_axis=-1, copy=False) if get_velocity: body_vel_bary = CartesianRepresentation( body_pv_bary['v'], unit=u.au/u.day, xyz_axis=-1, copy=False) else: if isinstance(body, str): # Look up kernel chain for JPL ephemeris, based on name try: kernel_spec = BODY_NAME_TO_KERNEL_SPEC[body.lower()] except KeyError: raise KeyError("{}'s position cannot be calculated with " "the {} ephemeris.".format(body, ephemeris)) else: # otherwise, assume the user knows what their doing and intentionally # passed in a kernel chain kernel_spec = body # jplephem cannot handle multi-D arrays, so convert to 1D here. jd1_shape = getattr(jd1, 'shape', ()) if len(jd1_shape) > 1: jd1, jd2 = jd1.ravel(), jd2.ravel() # Note that we use the new jd1.shape here to create a 1D result array. # It is reshaped below. body_posvel_bary = np.zeros((2 if get_velocity else 1, 3) + getattr(jd1, 'shape', ())) for pair in kernel_spec: spk = kernel[pair] if spk.data_type == 3: # Type 3 kernels contain both position and velocity. posvel = spk.compute(jd1, jd2) if get_velocity: body_posvel_bary += posvel.reshape(body_posvel_bary.shape) else: body_posvel_bary[0] += posvel[:4] else: # spk.generate first yields the position and then the # derivative. If no velocities are desired, body_posvel_bary # has only one element and thus the loop ends after a single # iteration, avoiding the velocity calculation. for body_p_or_v, p_or_v in zip(body_posvel_bary, spk.generate(jd1, jd2)): body_p_or_v += p_or_v body_posvel_bary.shape = body_posvel_bary.shape[:2] + jd1_shape body_pos_bary = CartesianRepresentation(body_posvel_bary[0], unit=u.km, copy=False) if get_velocity: body_vel_bary = CartesianRepresentation(body_posvel_bary[1], unit=u.km/u.day, copy=False) return (body_pos_bary, body_vel_bary) if get_velocity else body_pos_bary finally: if not default_kernel and kernel is not None: kernel.daf.file.close() def get_body_barycentric_posvel(body, time, ephemeris=None): """Calculate the barycentric position and velocity of a solar system body. Parameters ---------- body : str or list of tuple The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. ephemeris : str, optional Ephemeris to use. By default, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` Returns ------- position, velocity : tuple of `~astropy.coordinates.CartesianRepresentation` Tuple of barycentric (ICRS) position and velocity. See Also -------- get_body_barycentric : to calculate position only. This is faster by about a factor two for JPL kernels, but has no speed advantage for the built-in ephemeris. Notes ----- {_EPHEMERIS_NOTE} """ return _get_body_barycentric_posvel(body, time, ephemeris) def get_body_barycentric(body, time, ephemeris=None): """Calculate the barycentric position of a solar system body. Parameters ---------- body : str or list of tuple The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. ephemeris : str, optional Ephemeris to use. By default, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` Returns ------- position : `~astropy.coordinates.CartesianRepresentation` Barycentric (ICRS) position of the body in cartesian coordinates See Also -------- get_body_barycentric_posvel : to calculate both position and velocity. Notes ----- {_EPHEMERIS_NOTE} """ return _get_body_barycentric_posvel(body, time, ephemeris, get_velocity=False) def _get_apparent_body_position(body, time, ephemeris, obsgeoloc=None): """Calculate the apparent position of body ``body`` relative to Earth. This corrects for the light-travel time to the object. Parameters ---------- body : str or other The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. ephemeris : str, optional Ephemeris to use. By default, use the one set with ``~astropy.coordinates.solar_system_ephemeris.set`` obsgeoloc : `~astropy.coordinates.CartesianRepresentation`, optional The GCRS position of the observer Returns ------- cartesian_position : `~astropy.coordinates.CartesianRepresentation` Barycentric (ICRS) apparent position of the body in cartesian coordinates Notes ----- {_EPHEMERIS_NOTE} """ if ephemeris is None: ephemeris = solar_system_ephemeris.get() # Calculate position given approximate light travel time. delta_light_travel_time = 20. * u.s emitted_time = time light_travel_time = 0. * u.s earth_loc = get_body_barycentric('earth', time, ephemeris) if obsgeoloc is not None: earth_loc += obsgeoloc while np.any(np.fabs(delta_light_travel_time) > 1.0e-8*u.s): body_loc = get_body_barycentric(body, emitted_time, ephemeris) earth_distance = (body_loc - earth_loc).norm() delta_light_travel_time = (light_travel_time - earth_distance/speed_of_light) light_travel_time = earth_distance/speed_of_light emitted_time = time - light_travel_time return get_body_barycentric(body, emitted_time, ephemeris) def get_body(body, time, location=None, ephemeris=None): """ Get a `~astropy.coordinates.SkyCoord` for a solar system body as observed from a location on Earth in the `~astropy.coordinates.GCRS` reference system. Parameters ---------- body : str or list of tuple The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. location : `~astropy.coordinates.EarthLocation`, optional Location of observer on the Earth. If not given, will be taken from ``time`` (if not present, a geocentric observer will be assumed). ephemeris : str, optional Ephemeris to use. If not given, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` (which is set to 'builtin' by default). Returns ------- skycoord : `~astropy.coordinates.SkyCoord` GCRS Coordinate for the body Notes ----- The coordinate returned is the apparent position, which is the position of the body at time *t* minus the light travel time from the *body* to the observing *location*. {_EPHEMERIS_NOTE} """ if location is None: location = time.location if location is not None: obsgeoloc, obsgeovel = location.get_gcrs_posvel(time) else: obsgeoloc, obsgeovel = None, None cartrep = _get_apparent_body_position(body, time, ephemeris, obsgeoloc) icrs = ICRS(cartrep) gcrs = icrs.transform_to(GCRS(obstime=time, obsgeoloc=obsgeoloc, obsgeovel=obsgeovel)) return SkyCoord(gcrs) def get_moon(time, location=None, ephemeris=None): """ Get a `~astropy.coordinates.SkyCoord` for the Earth's Moon as observed from a location on Earth in the `~astropy.coordinates.GCRS` reference system. Parameters ---------- time : `~astropy.time.Time` Time of observation location : `~astropy.coordinates.EarthLocation` Location of observer on the Earth. If none is supplied, taken from ``time`` (if not present, a geocentric observer will be assumed). ephemeris : str, optional Ephemeris to use. If not given, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` (which is set to 'builtin' by default). Returns ------- skycoord : `~astropy.coordinates.SkyCoord` GCRS Coordinate for the Moon Notes ----- The coordinate returned is the apparent position, which is the position of the moon at time *t* minus the light travel time from the moon to the observing *location*. {_EPHEMERIS_NOTE} """ return get_body('moon', time, location=location, ephemeris=ephemeris) # Add note about the ephemeris choices to the docstrings of relevant functions. # Note: sadly, one cannot use f-strings for docstrings, so we format explicitly. for f in [f for f in locals().values() if callable(f) and f.__doc__ is not None and '{_EPHEMERIS_NOTE}' in f.__doc__]: f.__doc__ = f.__doc__.format(_EPHEMERIS_NOTE=indent(_EPHEMERIS_NOTE)[4:]) deprecation_msg = """ The use of _apparent_position_in_true_coordinates is deprecated because astropy now implements a True Equator True Equinox Frame (TETE), which should be used instead. """ @deprecated('4.2', deprecation_msg) def _apparent_position_in_true_coordinates(skycoord): """ Convert Skycoord in GCRS frame into one in which RA and Dec are defined w.r.t to the true equinox and poles of the Earth """ location = getattr(skycoord, 'location', None) if location is None: gcrs_rep = skycoord.obsgeoloc.with_differentials( {'s': CartesianDifferential.from_cartesian(skycoord.obsgeovel)}) location = (GCRS(gcrs_rep, obstime=skycoord.obstime) .transform_to(ITRS(obstime=skycoord.obstime)) .earth_location) tete_frame = TETE(obstime=skycoord.obstime, location=location) return skycoord.transform_to(tete_frame)
6fd2a118611249e1f3d9c9d217254ffd4452f04066638a2a6b700ef11673b1ee
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst # This module includes files automatically generated from ply (these end in # _lextab.py and _parsetab.py). To generate these files, remove them from this # folder, then build astropy and run the tests in-place: # # python setup.py build_ext --inplace # pytest astropy/coordinates # # You can then commit the changes to the re-generated _lextab.py and # _parsetab.py files. """ This module contains formatting functions that are for internal use in astropy.coordinates.angles. Mainly they are conversions from one format of data to another. """ import os import threading from warnings import warn import numpy as np from .errors import (IllegalHourWarning, IllegalHourError, IllegalMinuteWarning, IllegalMinuteError, IllegalSecondWarning, IllegalSecondError) from astropy.utils import format_exception, parsing from astropy.utils.decorators import deprecated from astropy import units as u class _AngleParser: """ Parses the various angle formats including: * 01:02:30.43 degrees * 1 2 0 hours * 1°2′3″ * 1d2m3s * -1h2m3s * 1°2′3″N This class should not be used directly. Use `parse_angle` instead. """ # For safe multi-threaded operation all class (but not instance) # members that carry state should be thread-local. They are stored # in the following class member _thread_local = threading.local() def __init__(self): # TODO: in principle, the parser should be invalidated if we change unit # system (from CDS to FITS, say). Might want to keep a link to the # unit_registry used, and regenerate the parser/lexer if it changes. # Alternatively, perhaps one should not worry at all and just pre- # generate the parser for each release (as done for unit formats). # For some discussion of this problem, see # https://github.com/astropy/astropy/issues/5350#issuecomment-248770151 if '_parser' not in _AngleParser._thread_local.__dict__: (_AngleParser._thread_local._parser, _AngleParser._thread_local._lexer) = self._make_parser() @classmethod def _get_simple_unit_names(cls): simple_units = set( u.radian.find_equivalent_units(include_prefix_units=True)) simple_unit_names = set() # We filter out degree and hourangle, since those are treated # separately. for unit in simple_units: if unit != u.deg and unit != u.hourangle: simple_unit_names.update(unit.names) return sorted(simple_unit_names) @classmethod def _make_parser(cls): from astropy.extern.ply import lex, yacc # List of token names. tokens = ( 'SIGN', 'UINT', 'UFLOAT', 'COLON', 'DEGREE', 'HOUR', 'MINUTE', 'SECOND', 'SIMPLE_UNIT', 'EASTWEST', 'NORTHSOUTH' ) # NOTE THE ORDERING OF THESE RULES IS IMPORTANT!! # Regular expression rules for simple tokens def t_UFLOAT(t): r'((\d+\.\d*)|(\.\d+))([eE][+-−]?\d+)?' # The above includes Unicode "MINUS SIGN" \u2212. It is # important to include the hyphen last, or the regex will # treat this as a range. t.value = float(t.value.replace('−', '-')) return t def t_UINT(t): r'\d+' t.value = int(t.value) return t def t_SIGN(t): r'[+−-]' # The above include Unicode "MINUS SIGN" \u2212. It is # important to include the hyphen last, or the regex will # treat this as a range. if t.value == '+': t.value = 1.0 else: t.value = -1.0 return t def t_EASTWEST(t): r'[EW]$' t.value = -1.0 if t.value == 'W' else 1.0 return t def t_NORTHSOUTH(t): r'[NS]$' # We cannot use lower-case letters otherwise we'll confuse # s[outh] with s[econd] t.value = -1.0 if t.value == 'S' else 1.0 return t def t_SIMPLE_UNIT(t): t.value = u.Unit(t.value) return t t_SIMPLE_UNIT.__doc__ = '|'.join( f'(?:{x})' for x in cls._get_simple_unit_names()) t_COLON = ':' t_DEGREE = r'd(eg(ree(s)?)?)?|°' t_HOUR = r'hour(s)?|h(r)?|ʰ' t_MINUTE = r'm(in(ute(s)?)?)?|′|\'|ᵐ' t_SECOND = r's(ec(ond(s)?)?)?|″|\"|ˢ' # A string containing ignored characters (spaces) t_ignore = ' ' # Error handling rule def t_error(t): raise ValueError( f"Invalid character at col {t.lexpos}") lexer = parsing.lex(lextab='angle_lextab', package='astropy/coordinates') def p_angle(p): ''' angle : sign hms eastwest | sign dms dir | sign arcsecond dir | sign arcminute dir | sign simple dir ''' sign = p[1] * p[3] value, unit = p[2] if isinstance(value, tuple): p[0] = ((sign * value[0],) + value[1:], unit) else: p[0] = (sign * value, unit) def p_sign(p): ''' sign : SIGN | ''' if len(p) == 2: p[0] = p[1] else: p[0] = 1.0 def p_eastwest(p): ''' eastwest : EASTWEST | ''' if len(p) == 2: p[0] = p[1] else: p[0] = 1.0 def p_dir(p): ''' dir : EASTWEST | NORTHSOUTH | ''' if len(p) == 2: p[0] = p[1] else: p[0] = 1.0 def p_ufloat(p): ''' ufloat : UFLOAT | UINT ''' p[0] = p[1] def p_colon(p): ''' colon : UINT COLON ufloat | UINT COLON UINT COLON ufloat ''' if len(p) == 4: p[0] = (p[1], p[3]) elif len(p) == 6: p[0] = (p[1], p[3], p[5]) def p_spaced(p): ''' spaced : UINT ufloat | UINT UINT ufloat ''' if len(p) == 3: p[0] = (p[1], p[2]) elif len(p) == 4: p[0] = (p[1], p[2], p[3]) def p_generic(p): ''' generic : colon | spaced | ufloat ''' p[0] = p[1] def p_hms(p): ''' hms : UINT HOUR | UINT HOUR ufloat | UINT HOUR UINT MINUTE | UINT HOUR UFLOAT MINUTE | UINT HOUR UINT MINUTE ufloat | UINT HOUR UINT MINUTE ufloat SECOND | generic HOUR ''' if len(p) == 3: p[0] = (p[1], u.hourangle) elif len(p) in (4, 5): p[0] = ((p[1], p[3]), u.hourangle) elif len(p) in (6, 7): p[0] = ((p[1], p[3], p[5]), u.hourangle) def p_dms(p): ''' dms : UINT DEGREE | UINT DEGREE ufloat | UINT DEGREE UINT MINUTE | UINT DEGREE UFLOAT MINUTE | UINT DEGREE UINT MINUTE ufloat | UINT DEGREE UINT MINUTE ufloat SECOND | generic DEGREE ''' if len(p) == 3: p[0] = (p[1], u.degree) elif len(p) in (4, 5): p[0] = ((p[1], p[3]), u.degree) elif len(p) in (6, 7): p[0] = ((p[1], p[3], p[5]), u.degree) def p_simple(p): ''' simple : generic | generic SIMPLE_UNIT ''' if len(p) == 2: p[0] = (p[1], None) else: p[0] = (p[1], p[2]) def p_arcsecond(p): ''' arcsecond : generic SECOND ''' p[0] = (p[1], u.arcsecond) def p_arcminute(p): ''' arcminute : generic MINUTE ''' p[0] = (p[1], u.arcminute) def p_error(p): raise ValueError parser = parsing.yacc(tabmodule='angle_parsetab', package='astropy/coordinates') return parser, lexer def parse(self, angle, unit, debug=False): try: found_angle, found_unit = self._thread_local._parser.parse( angle, lexer=self._thread_local._lexer, debug=debug) except ValueError as e: if str(e): raise ValueError(f"{str(e)} in angle {angle!r}") from e else: raise ValueError( f"Syntax error parsing angle {angle!r}") from e if unit is None and found_unit is None: raise u.UnitsError("No unit specified") return found_angle, found_unit def _check_hour_range(hrs): """ Checks that the given value is in the range (-24, 24). """ if np.any(np.abs(hrs) == 24.): warn(IllegalHourWarning(hrs, 'Treating as 24 hr')) elif np.any(hrs < -24.) or np.any(hrs > 24.): raise IllegalHourError(hrs) def _check_minute_range(m): """ Checks that the given value is in the range [0,60]. If the value is equal to 60, then a warning is raised. """ if np.any(m == 60.): warn(IllegalMinuteWarning(m, 'Treating as 0 min, +1 hr/deg')) elif np.any(m < -60.) or np.any(m > 60.): # "Error: minutes not in range [-60,60) ({0}).".format(min)) raise IllegalMinuteError(m) def _check_second_range(sec): """ Checks that the given value is in the range [0,60]. If the value is equal to 60, then a warning is raised. """ if np.any(sec == 60.): warn(IllegalSecondWarning(sec, 'Treating as 0 sec, +1 min')) elif sec is None: pass elif np.any(sec < -60.) or np.any(sec > 60.): # "Error: seconds not in range [-60,60) ({0}).".format(sec)) raise IllegalSecondError(sec) def check_hms_ranges(h, m, s): """ Checks that the given hour, minute and second are all within reasonable range. """ _check_hour_range(h) _check_minute_range(m) _check_second_range(s) return None def parse_angle(angle, unit=None, debug=False): """ Parses an input string value into an angle value. Parameters ---------- angle : str A string representing the angle. May be in one of the following forms: * 01:02:30.43 degrees * 1 2 0 hours * 1°2′3″ * 1d2m3s * -1h2m3s unit : `~astropy.units.UnitBase` instance, optional The unit used to interpret the string. If ``unit`` is not provided, the unit must be explicitly represented in the string, either at the end or as number separators. debug : bool, optional If `True`, print debugging information from the parser. Returns ------- value, unit : tuple ``value`` is the value as a floating point number or three-part tuple, and ``unit`` is a `Unit` instance which is either the unit passed in or the one explicitly mentioned in the input string. """ return _AngleParser().parse(angle, unit, debug=debug) def degrees_to_dms(d): """ Convert a floating-point degree value into a ``(degree, arcminute, arcsecond)`` tuple. """ sign = np.copysign(1.0, d) (df, d) = np.modf(np.abs(d)) # (degree fraction, degree) (mf, m) = np.modf(df * 60.) # (minute fraction, minute) s = mf * 60. return np.floor(sign * d), sign * np.floor(m), sign * s @deprecated("dms_to_degrees (or creating an Angle with a tuple) has ambiguous " "behavior when the degree value is 0", alternative="another way of creating angles instead (e.g. a less " "ambiguous string like '-0d1m2.3s'") def dms_to_degrees(d, m, s=None): """ Convert degrees, arcminute, arcsecond to a float degrees value. """ _check_minute_range(m) _check_second_range(s) # determine sign sign = np.copysign(1.0, d) try: d = np.floor(np.abs(d)) if s is None: m = np.abs(m) s = 0 else: m = np.floor(np.abs(m)) s = np.abs(s) except ValueError as err: raise ValueError(format_exception( "{func}: dms values ({1[0]},{2[1]},{3[2]}) could not be " "converted to numbers.", d, m, s)) from err return sign * (d + m / 60. + s / 3600.) @deprecated("hms_to_hours (or creating an Angle with a tuple) has ambiguous " "behavior when the hour value is 0", alternative="another way of creating angles instead (e.g. a less " "ambiguous string like '-0h1m2.3s'") def hms_to_hours(h, m, s=None): """ Convert hour, minute, second to a float hour value. """ check_hms_ranges(h, m, s) # determine sign sign = np.copysign(1.0, h) try: h = np.floor(np.abs(h)) if s is None: m = np.abs(m) s = 0 else: m = np.floor(np.abs(m)) s = np.abs(s) except ValueError as err: raise ValueError(format_exception( "{func}: HMS values ({1[0]},{2[1]},{3[2]}) could not be " "converted to numbers.", h, m, s)) from err return sign * (h + m / 60. + s / 3600.) def hms_to_degrees(h, m, s): """ Convert hour, minute, second to a float degrees value. """ return hms_to_hours(h, m, s) * 15. def hms_to_radians(h, m, s): """ Convert hour, minute, second to a float radians value. """ return u.degree.to(u.radian, hms_to_degrees(h, m, s)) def hms_to_dms(h, m, s): """ Convert degrees, arcminutes, arcseconds to an ``(hour, minute, second)`` tuple. """ return degrees_to_dms(hms_to_degrees(h, m, s)) def hours_to_decimal(h): """ Convert any parseable hour value into a float value. """ from . import angles return angles.Angle(h, unit=u.hourangle).hour def hours_to_radians(h): """ Convert an angle in Hours to Radians. """ return u.hourangle.to(u.radian, h) def hours_to_hms(h): """ Convert an floating-point hour value into an ``(hour, minute, second)`` tuple. """ sign = np.copysign(1.0, h) (hf, h) = np.modf(np.abs(h)) # (degree fraction, degree) (mf, m) = np.modf(hf * 60.0) # (minute fraction, minute) s = mf * 60.0 return (np.floor(sign * h), sign * np.floor(m), sign * s) def radians_to_degrees(r): """ Convert an angle in Radians to Degrees. """ return u.radian.to(u.degree, r) def radians_to_hours(r): """ Convert an angle in Radians to Hours. """ return u.radian.to(u.hourangle, r) def radians_to_hms(r): """ Convert an angle in Radians to an ``(hour, minute, second)`` tuple. """ hours = radians_to_hours(r) return hours_to_hms(hours) def radians_to_dms(r): """ Convert an angle in Radians to an ``(degree, arcminute, arcsecond)`` tuple. """ degrees = u.radian.to(u.degree, r) return degrees_to_dms(degrees) def sexagesimal_to_string(values, precision=None, pad=False, sep=(':',), fields=3): """ Given an already separated tuple of sexagesimal values, returns a string. See `hours_to_string` and `degrees_to_string` for a higher-level interface to this functionality. """ # Check to see if values[0] is negative, using np.copysign to handle -0 sign = np.copysign(1.0, values[0]) # If the coordinates are negative, we need to take the absolute values. # We use np.abs because abs(-0) is -0 # TODO: Is this true? (MHvK, 2018-02-01: not on my system) values = [np.abs(value) for value in values] if pad: if sign == -1: pad = 3 else: pad = 2 else: pad = 0 if not isinstance(sep, tuple): sep = tuple(sep) if fields < 1 or fields > 3: raise ValueError( "fields must be 1, 2, or 3") if not sep: # empty string, False, or None, etc. sep = ('', '', '') elif len(sep) == 1: if fields == 3: sep = sep + (sep[0], '') elif fields == 2: sep = sep + ('', '') else: sep = ('', '', '') elif len(sep) == 2: sep = sep + ('',) elif len(sep) != 3: raise ValueError( "Invalid separator specification for converting angle to string.") # Simplify the expression based on the requested precision. For # example, if the seconds will round up to 60, we should convert # it to 0 and carry upwards. If the field is hidden (by the # fields kwarg) we round up around the middle, 30.0. if precision is None: rounding_thresh = 60.0 - (10.0 ** -8) else: rounding_thresh = 60.0 - (10.0 ** -precision) if fields == 3 and values[2] >= rounding_thresh: values[2] = 0.0 values[1] += 1.0 elif fields < 3 and values[2] >= 30.0: values[1] += 1.0 if fields >= 2 and values[1] >= 60.0: values[1] = 0.0 values[0] += 1.0 elif fields < 2 and values[1] >= 30.0: values[0] += 1.0 literal = [] last_value = '' literal.append('{0:0{pad}.0f}{sep[0]}') if fields >= 2: literal.append('{1:02d}{sep[1]}') if fields == 3: if precision is None: last_value = f'{abs(values[2]):.8f}' last_value = last_value.rstrip('0').rstrip('.') else: last_value = '{0:.{precision}f}'.format( abs(values[2]), precision=precision) if len(last_value) == 1 or last_value[1] == '.': last_value = '0' + last_value literal.append('{last_value}{sep[2]}') literal = ''.join(literal) return literal.format(np.copysign(values[0], sign), int(values[1]), values[2], sep=sep, pad=pad, last_value=last_value) def hours_to_string(h, precision=5, pad=False, sep=('h', 'm', 's'), fields=3): """ Takes a decimal hour value and returns a string formatted as hms with separator specified by the 'sep' parameter. ``h`` must be a scalar. """ h, m, s = hours_to_hms(h) return sexagesimal_to_string((h, m, s), precision=precision, pad=pad, sep=sep, fields=fields) def degrees_to_string(d, precision=5, pad=False, sep=':', fields=3): """ Takes a decimal hour value and returns a string formatted as dms with separator specified by the 'sep' parameter. ``d`` must be a scalar. """ d, m, s = degrees_to_dms(d) return sexagesimal_to_string((d, m, s), precision=precision, pad=pad, sep=sep, fields=fields)
f598649abc6333a4346e816113fcdc14a06c7fade567b92d9582718e1a3e7c3c
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Currently the only site accessible without internet access is the Royal Greenwich Observatory, as an example (and for testing purposes). In future releases, a canonical set of sites may be bundled into astropy for when the online registry is unavailable. Additions or corrections to the observatory list can be submitted via Pull Request to the [astropy-data GitHub repository](https://github.com/astropy/astropy-data), updating the ``location.json`` file. """ import json from difflib import get_close_matches from collections.abc import Mapping from astropy.utils.data import get_pkg_data_contents, get_file_contents from .earth import EarthLocation from .errors import UnknownSiteException from astropy import units as u class SiteRegistry(Mapping): """ A bare-bones registry of EarthLocation objects. This acts as a mapping (dict-like object) but with the important caveat that it's always transforms its inputs to lower-case. So keys are always all lower-case, and even if you ask for something that's got mixed case, it will be interpreted as the all lower-case version. """ def __init__(self): # the keys to this are always lower-case self._lowercase_names_to_locations = {} # these can be whatever case is appropriate self._names = [] def __getitem__(self, site_name): """ Returns an EarthLocation for a known site in this registry. Parameters ---------- site_name : str Name of the observatory (case-insensitive). Returns ------- site : `~astropy.coordinates.EarthLocation` The location of the observatory. """ if site_name.lower() not in self._lowercase_names_to_locations: # If site name not found, find close matches and suggest them in error close_names = get_close_matches(site_name, self._lowercase_names_to_locations) close_names = sorted(close_names, key=len) raise UnknownSiteException(site_name, "the 'names' attribute", close_names=close_names) return self._lowercase_names_to_locations[site_name.lower()] def __len__(self): return len(self._lowercase_names_to_locations) def __iter__(self): return iter(self._lowercase_names_to_locations) def __contains__(self, site_name): return site_name.lower() in self._lowercase_names_to_locations @property def names(self): """ The names in this registry. Note that these are *not* exactly the same as the keys: keys are always lower-case, while `names` is what you should use for the actual readable names (which may be case-sensitive) Returns ------- site : list of str The names of the sites in this registry """ return sorted(self._names) def add_site(self, names, locationobj): """ Adds a location to the registry. Parameters ---------- names : list of str All the names this site should go under locationobj : `~astropy.coordinates.EarthLocation` The actual site object """ for name in names: self._lowercase_names_to_locations[name.lower()] = locationobj self._names.append(name) @classmethod def from_json(cls, jsondb): reg = cls() for site in jsondb: site_info = jsondb[site].copy() location = EarthLocation.from_geodetic(site_info.pop('longitude') * u.Unit(site_info.pop('longitude_unit')), site_info.pop('latitude') * u.Unit(site_info.pop('latitude_unit')), site_info.pop('elevation') * u.Unit(site_info.pop('elevation_unit'))) name = site_info.pop('name') location.info.name = name aliases = [alias for alias in site_info.pop('aliases') if alias] if name not in aliases and name != site: aliases.append(name) location.info.meta = site_info # whatever is left reg.add_site([site] + aliases, location) reg._loaded_jsondb = jsondb return reg def get_builtin_sites(): """ Load observatory database from data/observatories.json and parse them into a SiteRegistry. """ jsondb = json.loads(get_pkg_data_contents('data/sites.json')) return SiteRegistry.from_json(jsondb) def get_downloaded_sites(jsonurl=None): """ Load observatory database from data.astropy.org and parse into a SiteRegistry """ # we explicitly set the encoding because the default is to leave it set by # the users' locale, which may fail if it's not matched to the sites.json if jsonurl is None: content = get_pkg_data_contents('coordinates/sites.json', encoding='UTF-8') else: content = get_file_contents(jsonurl, encoding='UTF-8') jsondb = json.loads(content) return SiteRegistry.from_json(jsondb)
e36719d1c40db795efeb84b4f116bd3a658552468dca530f3f2fcb415f6ce03c
# Licensed under a 3-clause BSD style license - see LICENSE.rst from warnings import warn import collections import socket import json import urllib.request import urllib.error import urllib.parse import numpy as np import erfa from astropy import units as u from astropy import constants as consts from astropy.units.quantity import QuantityInfoBase from astropy.utils import data from astropy.utils.decorators import format_doc from astropy.utils.exceptions import AstropyUserWarning from .angles import Angle, Longitude, Latitude from .representation import (BaseRepresentation, CartesianRepresentation, CartesianDifferential) from .matrix_utilities import matrix_transpose from .errors import UnknownSiteException __all__ = ['EarthLocation', 'BaseGeodeticRepresentation', 'WGS84GeodeticRepresentation', 'WGS72GeodeticRepresentation', 'GRS80GeodeticRepresentation'] GeodeticLocation = collections.namedtuple('GeodeticLocation', ['lon', 'lat', 'height']) ELLIPSOIDS = {} """Available ellipsoids (defined in erfam.h, with numbers exposed in erfa).""" # Note: they get filled by the creation of the geodetic classes. OMEGA_EARTH = ((1.002_737_811_911_354_48 * u.cycle/u.day) .to(1/u.s, u.dimensionless_angles())) """ Rotational velocity of Earth, following SOFA's pvtob. In UT1 seconds, this would be 2 pi / (24 * 3600), but we need the value in SI seconds, so multiply by the ratio of stellar to solar day. See Explanatory Supplement to the Astronomical Almanac, ed. P. Kenneth Seidelmann (1992), University Science Books. The constant is the conventional, exact one (IERS conventions 2003); see http://hpiers.obspm.fr/eop-pc/index.php?index=constants. """ def _check_ellipsoid(ellipsoid=None, default='WGS84'): if ellipsoid is None: ellipsoid = default if ellipsoid not in ELLIPSOIDS: raise ValueError(f'Ellipsoid {ellipsoid} not among known ones ({ELLIPSOIDS})') return ellipsoid def _get_json_result(url, err_str, use_google): # need to do this here to prevent a series of complicated circular imports from .name_resolve import NameResolveError try: # Retrieve JSON response from Google maps API resp = urllib.request.urlopen(url, timeout=data.conf.remote_timeout) resp_data = json.loads(resp.read().decode('utf8')) except urllib.error.URLError as e: # This catches a timeout error, see: # http://stackoverflow.com/questions/2712524/handling-urllib2s-timeout-python if isinstance(e.reason, socket.timeout): raise NameResolveError(err_str.format(msg="connection timed out")) from e else: raise NameResolveError(err_str.format(msg=e.reason)) from e except socket.timeout: # There are some cases where urllib2 does not catch socket.timeout # especially while receiving response data on an already previously # working request raise NameResolveError(err_str.format(msg="connection timed out")) if use_google: results = resp_data.get('results', []) if resp_data.get('status', None) != 'OK': raise NameResolveError(err_str.format(msg="unknown failure with " "Google API")) else: # OpenStreetMap returns a list results = resp_data if not results: raise NameResolveError(err_str.format(msg="no results returned")) return results class EarthLocationInfo(QuantityInfoBase): """ Container for meta information like name, description, format. This is required when the object is used as a mixin column within a table, but can be used as a general way to store meta information. """ _represent_as_dict_attrs = ('x', 'y', 'z', 'ellipsoid') def _construct_from_dict(self, map): # Need to pop ellipsoid off and update post-instantiation. This is # on the to-fix list in #4261. ellipsoid = map.pop('ellipsoid') out = self._parent_cls(**map) out.ellipsoid = ellipsoid return out def new_like(self, cols, length, metadata_conflicts='warn', name=None): """ Return a new EarthLocation instance which is consistent with the input ``cols`` and has ``length`` rows. This is intended for creating an empty column object whose elements can be set in-place for table operations like join or vstack. Parameters ---------- cols : list List of input columns length : int Length of the output column object metadata_conflicts : str ('warn'|'error'|'silent') How to handle metadata conflicts name : str Output column name Returns ------- col : EarthLocation (or subclass) Empty instance of this class consistent with ``cols`` """ # Very similar to QuantityInfo.new_like, but the creation of the # map is different enough that this needs its own rouinte. # Get merged info attributes shape, dtype, format, description. attrs = self.merge_cols_attributes(cols, metadata_conflicts, name, ('meta', 'format', 'description')) # The above raises an error if the dtypes do not match, but returns # just the string representation, which is not useful, so remove. attrs.pop('dtype') # Make empty EarthLocation using the dtype and unit of the last column. # Use zeros so we do not get problems for possible conversion to # geodetic coordinates. shape = (length,) + attrs.pop('shape') data = u.Quantity(np.zeros(shape=shape, dtype=cols[0].dtype), unit=cols[0].unit, copy=False) # Get arguments needed to reconstruct class map = {key: (data[key] if key in 'xyz' else getattr(cols[-1], key)) for key in self._represent_as_dict_attrs} out = self._construct_from_dict(map) # Set remaining info attributes for attr, value in attrs.items(): setattr(out.info, attr, value) return out class EarthLocation(u.Quantity): """ Location on the Earth. Initialization is first attempted assuming geocentric (x, y, z) coordinates are given; if that fails, another attempt is made assuming geodetic coordinates (longitude, latitude, height above a reference ellipsoid). When using the geodetic forms, Longitudes are measured increasing to the east, so west longitudes are negative. Internally, the coordinates are stored as geocentric. To ensure a specific type of coordinates is used, use the corresponding class methods (`from_geocentric` and `from_geodetic`) or initialize the arguments with names (``x``, ``y``, ``z`` for geocentric; ``lon``, ``lat``, ``height`` for geodetic). See the class methods for details. Notes ----- This class fits into the coordinates transformation framework in that it encodes a position on the `~astropy.coordinates.ITRS` frame. To get a proper `~astropy.coordinates.ITRS` object from this object, use the ``itrs`` property. """ _ellipsoid = 'WGS84' _location_dtype = np.dtype({'names': ['x', 'y', 'z'], 'formats': [np.float64]*3}) _array_dtype = np.dtype((np.float64, (3,))) info = EarthLocationInfo() def __new__(cls, *args, **kwargs): # TODO: needs copy argument and better dealing with inputs. if (len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], EarthLocation)): return args[0].copy() try: self = cls.from_geocentric(*args, **kwargs) except (u.UnitsError, TypeError) as exc_geocentric: try: self = cls.from_geodetic(*args, **kwargs) except Exception as exc_geodetic: raise TypeError('Coordinates could not be parsed as either ' 'geocentric or geodetic, with respective ' 'exceptions "{}" and "{}"' .format(exc_geocentric, exc_geodetic)) return self @classmethod def from_geocentric(cls, x, y, z, unit=None): """ Location on Earth, initialized from geocentric coordinates. Parameters ---------- x, y, z : `~astropy.units.Quantity` or array-like Cartesian coordinates. If not quantities, ``unit`` should be given. unit : unit-like or None Physical unit of the coordinate values. If ``x``, ``y``, and/or ``z`` are quantities, they will be converted to this unit. Raises ------ astropy.units.UnitsError If the units on ``x``, ``y``, and ``z`` do not match or an invalid unit is given. ValueError If the shapes of ``x``, ``y``, and ``z`` do not match. TypeError If ``x`` is not a `~astropy.units.Quantity` and no unit is given. """ if unit is None: try: unit = x.unit except AttributeError: raise TypeError("Geocentric coordinates should be Quantities " "unless an explicit unit is given.") from None else: unit = u.Unit(unit) if unit.physical_type != 'length': raise u.UnitsError("Geocentric coordinates should be in " "units of length.") try: x = u.Quantity(x, unit, copy=False) y = u.Quantity(y, unit, copy=False) z = u.Quantity(z, unit, copy=False) except u.UnitsError: raise u.UnitsError("Geocentric coordinate units should all be " "consistent.") x, y, z = np.broadcast_arrays(x, y, z) struc = np.empty(x.shape, cls._location_dtype) struc['x'], struc['y'], struc['z'] = x, y, z return super().__new__(cls, struc, unit, copy=False) @classmethod def from_geodetic(cls, lon, lat, height=0., ellipsoid=None): """ Location on Earth, initialized from geodetic coordinates. Parameters ---------- lon : `~astropy.coordinates.Longitude` or float Earth East longitude. Can be anything that initialises an `~astropy.coordinates.Angle` object (if float, in degrees). lat : `~astropy.coordinates.Latitude` or float Earth latitude. Can be anything that initialises an `~astropy.coordinates.Latitude` object (if float, in degrees). height : `~astropy.units.Quantity` ['length'] or float, optional Height above reference ellipsoid (if float, in meters; default: 0). ellipsoid : str, optional Name of the reference ellipsoid to use (default: 'WGS84'). Available ellipsoids are: 'WGS84', 'GRS80', 'WGS72'. Raises ------ astropy.units.UnitsError If the units on ``lon`` and ``lat`` are inconsistent with angular ones, or that on ``height`` with a length. ValueError If ``lon``, ``lat``, and ``height`` do not have the same shape, or if ``ellipsoid`` is not recognized as among the ones implemented. Notes ----- For the conversion to geocentric coordinates, the ERFA routine ``gd2gc`` is used. See https://github.com/liberfa/erfa """ ellipsoid = _check_ellipsoid(ellipsoid, default=cls._ellipsoid) # As wrapping fails on readonly input, we do so manually lon = Angle(lon, u.degree, copy=False).wrap_at(180 * u.degree) lat = Latitude(lat, u.degree, copy=False) # don't convert to m by default, so we can use the height unit below. if not isinstance(height, u.Quantity): height = u.Quantity(height, u.m, copy=False) # get geocentric coordinates. geodetic = ELLIPSOIDS[ellipsoid](lon, lat, height, copy=False) xyz = geodetic.to_cartesian().get_xyz(xyz_axis=-1) << height.unit self = xyz.view(cls._location_dtype, cls).reshape(geodetic.shape) self._ellipsoid = ellipsoid return self @classmethod def of_site(cls, site_name): """ Return an object of this class for a known observatory/site by name. This is intended as a quick convenience function to get basic site information, not a fully-featured exhaustive registry of observatories and all their properties. Additional information about the site is stored in the ``.info.meta`` dictionary of sites obtained using this method (see the examples below). .. note:: When this function is called, it will attempt to download site information from the astropy data server. If you would like a site to be added, issue a pull request to the `astropy-data repository <https://github.com/astropy/astropy-data>`_ . If a site cannot be found in the registry (i.e., an internet connection is not available), it will fall back on a built-in list, In the future, this bundled list might include a version-controlled list of canonical observatories extracted from the online version, but it currently only contains the Greenwich Royal Observatory as an example case. Parameters ---------- site_name : str Name of the observatory (case-insensitive). Returns ------- site : `~astropy.coordinates.EarthLocation` (or subclass) instance The location of the observatory. The returned class will be the same as this class. Examples -------- >>> from astropy.coordinates import EarthLocation >>> keck = EarthLocation.of_site('Keck Observatory') # doctest: +REMOTE_DATA >>> keck.geodetic # doctest: +REMOTE_DATA +FLOAT_CMP GeodeticLocation(lon=<Longitude -155.47833333 deg>, lat=<Latitude 19.82833333 deg>, height=<Quantity 4160. m>) >>> keck.info # doctest: +REMOTE_DATA name = W. M. Keck Observatory dtype = (float64, float64, float64) unit = m class = EarthLocation n_bad = 0 >>> keck.info.meta # doctest: +REMOTE_DATA {'source': 'IRAF Observatory Database', 'timezone': 'US/Hawaii'} See Also -------- get_site_names : the list of sites that this function can access """ # noqa registry = cls._get_site_registry() try: el = registry[site_name] except UnknownSiteException as e: raise UnknownSiteException(e.site, 'EarthLocation.get_site_names', close_names=e.close_names) from e if cls is el.__class__: return el else: newel = cls.from_geodetic(*el.to_geodetic()) newel.info.name = el.info.name return newel @classmethod def of_address(cls, address, get_height=False, google_api_key=None): """ Return an object of this class for a given address by querying either the OpenStreetMap Nominatim tool [1]_ (default) or the Google geocoding API [2]_, which requires a specified API key. This is intended as a quick convenience function to get easy access to locations. If you need to specify a precise location, you should use the initializer directly and pass in a longitude, latitude, and elevation. In the background, this just issues a web query to either of the APIs noted above. This is not meant to be abused! Both OpenStreetMap and Google use IP-based query limiting and will ban your IP if you send more than a few thousand queries per hour [2]_. .. warning:: If the query returns more than one location (e.g., searching on ``address='springfield'``), this function will use the **first** returned location. Parameters ---------- address : str The address to get the location for. As per the Google maps API, this can be a fully specified street address (e.g., 123 Main St., New York, NY) or a city name (e.g., Danbury, CT), or etc. get_height : bool, optional This only works when using the Google API! See the ``google_api_key`` block below. Use the retrieved location to perform a second query to the Google maps elevation API to retrieve the height of the input address [3]_. google_api_key : str, optional A Google API key with the Geocoding API and (optionally) the elevation API enabled. See [4]_ for more information. Returns ------- location : `~astropy.coordinates.EarthLocation` (or subclass) instance The location of the input address. Will be type(this class) References ---------- .. [1] https://nominatim.openstreetmap.org/ .. [2] https://developers.google.com/maps/documentation/geocoding/start .. [3] https://developers.google.com/maps/documentation/elevation/start .. [4] https://developers.google.com/maps/documentation/geocoding/get-api-key """ use_google = google_api_key is not None # Fail fast if invalid options are passed: if not use_google and get_height: raise ValueError( 'Currently, `get_height` only works when using ' 'the Google geocoding API, which requires passing ' 'a Google API key with `google_api_key`. See: ' 'https://developers.google.com/maps/documentation/geocoding/get-api-key ' 'for information on obtaining an API key.') if use_google: # Google pars = urllib.parse.urlencode({'address': address, 'key': google_api_key}) geo_url = f"https://maps.googleapis.com/maps/api/geocode/json?{pars}" else: # OpenStreetMap pars = urllib.parse.urlencode({'q': address, 'format': 'json'}) geo_url = f"https://nominatim.openstreetmap.org/search?{pars}" # get longitude and latitude location err_str = f"Unable to retrieve coordinates for address '{address}'; {{msg}}" geo_result = _get_json_result(geo_url, err_str=err_str, use_google=use_google) if use_google: loc = geo_result[0]['geometry']['location'] lat = loc['lat'] lon = loc['lng'] else: loc = geo_result[0] lat = float(loc['lat']) # strings are returned by OpenStreetMap lon = float(loc['lon']) if get_height: pars = {'locations': f'{lat:.8f},{lon:.8f}', 'key': google_api_key} pars = urllib.parse.urlencode(pars) ele_url = f"https://maps.googleapis.com/maps/api/elevation/json?{pars}" err_str = f"Unable to retrieve elevation for address '{address}'; {{msg}}" ele_result = _get_json_result(ele_url, err_str=err_str, use_google=use_google) height = ele_result[0]['elevation']*u.meter else: height = 0. return cls.from_geodetic(lon=lon*u.deg, lat=lat*u.deg, height=height) @classmethod def get_site_names(cls): """ Get list of names of observatories for use with `~astropy.coordinates.EarthLocation.of_site`. .. note:: When this function is called, it will first attempt to download site information from the astropy data server. If it cannot (i.e., an internet connection is not available), it will fall back on the list included with astropy (which is a limited and dated set of sites). If you think a site should be added, issue a pull request to the `astropy-data repository <https://github.com/astropy/astropy-data>`_ . Returns ------- names : list of str List of valid observatory names See Also -------- of_site : Gets the actual location object for one of the sites names this returns. """ return cls._get_site_registry().names @classmethod def _get_site_registry(cls, force_download=False, force_builtin=False): """ Gets the site registry. The first time this either downloads or loads from the data file packaged with astropy. Subsequent calls will use the cached version unless explicitly overridden. Parameters ---------- force_download : bool or str If not False, force replacement of the cached registry with a downloaded version. If a str, that will be used as the URL to download from (if just True, the default URL will be used). force_builtin : bool If True, load from the data file bundled with astropy and set the cache to that. Returns ------- reg : astropy.coordinates.sites.SiteRegistry """ # need to do this here at the bottom to avoid circular dependencies from .sites import get_builtin_sites, get_downloaded_sites if force_builtin and force_download: raise ValueError('Cannot have both force_builtin and force_download True') if force_builtin: reg = cls._site_registry = get_builtin_sites() else: reg = getattr(cls, '_site_registry', None) if force_download or not reg: try: if isinstance(force_download, str): reg = get_downloaded_sites(force_download) else: reg = get_downloaded_sites() except OSError: if force_download: raise msg = ('Could not access the online site list. Falling ' 'back on the built-in version, which is rather ' 'limited. If you want to retry the download, do ' '{0}._get_site_registry(force_download=True)') warn(AstropyUserWarning(msg.format(cls.__name__))) reg = get_builtin_sites() cls._site_registry = reg return reg @property def ellipsoid(self): """The default ellipsoid used to convert to geodetic coordinates.""" return self._ellipsoid @ellipsoid.setter def ellipsoid(self, ellipsoid): self._ellipsoid = _check_ellipsoid(ellipsoid) @property def geodetic(self): """Convert to geodetic coordinates for the default ellipsoid.""" return self.to_geodetic() def to_geodetic(self, ellipsoid=None): """Convert to geodetic coordinates. Parameters ---------- ellipsoid : str, optional Reference ellipsoid to use. Default is the one the coordinates were initialized with. Available are: 'WGS84', 'GRS80', 'WGS72' Returns ------- lon, lat, height : `~astropy.units.Quantity` The tuple is a ``GeodeticLocation`` namedtuple and is comprised of instances of `~astropy.coordinates.Longitude`, `~astropy.coordinates.Latitude`, and `~astropy.units.Quantity`. Raises ------ ValueError if ``ellipsoid`` is not recognized as among the ones implemented. Notes ----- For the conversion to geodetic coordinates, the ERFA routine ``gc2gd`` is used. See https://github.com/liberfa/erfa """ ellipsoid = _check_ellipsoid(ellipsoid, default=self.ellipsoid) xyz = self.view(self._array_dtype, u.Quantity) llh = CartesianRepresentation(xyz, xyz_axis=-1, copy=False).represent_as( ELLIPSOIDS[ellipsoid]) return GeodeticLocation( Longitude(llh.lon, u.deg, wrap_angle=180*u.deg, copy=False), llh.lat << u.deg, llh.height << self.unit) @property def lon(self): """Longitude of the location, for the default ellipsoid.""" return self.geodetic[0] @property def lat(self): """Latitude of the location, for the default ellipsoid.""" return self.geodetic[1] @property def height(self): """Height of the location, for the default ellipsoid.""" return self.geodetic[2] # mostly for symmetry with geodetic and to_geodetic. @property def geocentric(self): """Convert to a tuple with X, Y, and Z as quantities""" return self.to_geocentric() def to_geocentric(self): """Convert to a tuple with X, Y, and Z as quantities""" return (self.x, self.y, self.z) def get_itrs(self, obstime=None): """ Generates an `~astropy.coordinates.ITRS` object with the location of this object at the requested ``obstime``. Parameters ---------- obstime : `~astropy.time.Time` or None The ``obstime`` to apply to the new `~astropy.coordinates.ITRS`, or if None, the default ``obstime`` will be used. Returns ------- itrs : `~astropy.coordinates.ITRS` The new object in the ITRS frame """ # Broadcast for a single position at multiple times, but don't attempt # to be more general here. if obstime and self.size == 1 and obstime.shape: self = np.broadcast_to(self, obstime.shape, subok=True) # do this here to prevent a series of complicated circular imports from .builtin_frames import ITRS return ITRS(x=self.x, y=self.y, z=self.z, obstime=obstime) itrs = property(get_itrs, doc="""An `~astropy.coordinates.ITRS` object with for the location of this object at the default ``obstime``.""") def get_gcrs(self, obstime): """GCRS position with velocity at ``obstime`` as a GCRS coordinate. Parameters ---------- obstime : `~astropy.time.Time` The ``obstime`` to calculate the GCRS position/velocity at. Returns ------- gcrs : `~astropy.coordinates.GCRS` instance With velocity included. """ # do this here to prevent a series of complicated circular imports from .builtin_frames import GCRS loc, vel = self.get_gcrs_posvel(obstime) loc.differentials['s'] = CartesianDifferential.from_cartesian(vel) return GCRS(loc, obstime=obstime) def _get_gcrs_posvel(self, obstime, ref_to_itrs, gcrs_to_ref): """Calculate GCRS position and velocity given transformation matrices. The reference frame z axis must point to the Celestial Intermediate Pole (as is the case for CIRS and TETE). This private method is used in intermediate_rotation_transforms, where some of the matrices are already available for the coordinate transformation. The method is faster by an order of magnitude than just adding a zero velocity to ITRS and transforming to GCRS, because it avoids calculating the velocity via finite differencing of the results of the transformation at three separate times. """ # The simplest route is to transform to the reference frame where the # z axis is properly aligned with the Earth's rotation axis (CIRS or # TETE), then calculate the velocity, and then transform this # reference position and velocity to GCRS. For speed, though, we # transform the coordinates to GCRS in one step, and calculate the # velocities by rotating around the earth's axis transformed to GCRS. ref_to_gcrs = matrix_transpose(gcrs_to_ref) itrs_to_gcrs = ref_to_gcrs @ matrix_transpose(ref_to_itrs) # Earth's rotation vector in the ref frame is rot_vec_ref = (0,0,OMEGA_EARTH), # so in GCRS it is rot_vec_gcrs[..., 2] @ OMEGA_EARTH. rot_vec_gcrs = CartesianRepresentation(ref_to_gcrs[..., 2] * OMEGA_EARTH, xyz_axis=-1, copy=False) # Get the position in the GCRS frame. # Since we just need the cartesian representation of ITRS, avoid get_itrs(). itrs_cart = CartesianRepresentation(self.x, self.y, self.z, copy=False) pos = itrs_cart.transform(itrs_to_gcrs) vel = rot_vec_gcrs.cross(pos) return pos, vel def get_gcrs_posvel(self, obstime): """ Calculate the GCRS position and velocity of this object at the requested ``obstime``. Parameters ---------- obstime : `~astropy.time.Time` The ``obstime`` to calculate the GCRS position/velocity at. Returns ------- obsgeoloc : `~astropy.coordinates.CartesianRepresentation` The GCRS position of the object obsgeovel : `~astropy.coordinates.CartesianRepresentation` The GCRS velocity of the object """ # Local import to prevent circular imports. from .builtin_frames.intermediate_rotation_transforms import ( cirs_to_itrs_mat, gcrs_to_cirs_mat) # Get gcrs_posvel by transforming via CIRS (slightly faster than TETE). return self._get_gcrs_posvel(obstime, cirs_to_itrs_mat(obstime), gcrs_to_cirs_mat(obstime)) def gravitational_redshift(self, obstime, bodies=['sun', 'jupiter', 'moon'], masses={}): """Return the gravitational redshift at this EarthLocation. Calculates the gravitational redshift, of order 3 m/s, due to the requested solar system bodies. Parameters ---------- obstime : `~astropy.time.Time` The ``obstime`` to calculate the redshift at. bodies : iterable, optional The bodies (other than the Earth) to include in the redshift calculation. List elements should be any body name `get_body_barycentric` accepts. Defaults to Jupiter, the Sun, and the Moon. Earth is always included (because the class represents an *Earth* location). masses : dict[str, `~astropy.units.Quantity`], optional The mass or gravitational parameters (G * mass) to assume for the bodies requested in ``bodies``. Can be used to override the defaults for the Sun, Jupiter, the Moon, and the Earth, or to pass in masses for other bodies. Returns ------- redshift : `~astropy.units.Quantity` Gravitational redshift in velocity units at given obstime. """ # needs to be here to avoid circular imports from .solar_system import get_body_barycentric bodies = list(bodies) # Ensure earth is included and last in the list. if 'earth' in bodies: bodies.remove('earth') bodies.append('earth') _masses = {'sun': consts.GM_sun, 'jupiter': consts.GM_jup, 'moon': consts.G * 7.34767309e22*u.kg, 'earth': consts.GM_earth} _masses.update(masses) GMs = [] M_GM_equivalency = (u.kg, u.Unit(consts.G * u.kg)) for body in bodies: try: GMs.append(_masses[body].to(u.m**3/u.s**2, [M_GM_equivalency])) except KeyError as err: raise KeyError(f'body "{body}" does not have a mass.') from err except u.UnitsError as exc: exc.args += ('"masses" argument values must be masses or ' 'gravitational parameters.',) raise positions = [get_body_barycentric(name, obstime) for name in bodies] # Calculate distances to objects other than earth. distances = [(pos - positions[-1]).norm() for pos in positions[:-1]] # Append distance from Earth's center for Earth's contribution. distances.append(CartesianRepresentation(self.geocentric).norm()) # Get redshifts due to all objects. redshifts = [-GM / consts.c / distance for (GM, distance) in zip(GMs, distances)] # Reverse order of summing, to go from small to big, and to get # "earth" first, which gives m/s as unit. return sum(redshifts[::-1]) @property def x(self): """The X component of the geocentric coordinates.""" return self['x'] @property def y(self): """The Y component of the geocentric coordinates.""" return self['y'] @property def z(self): """The Z component of the geocentric coordinates.""" return self['z'] def __getitem__(self, item): result = super().__getitem__(item) if result.dtype is self.dtype: return result.view(self.__class__) else: return result.view(u.Quantity) def __array_finalize__(self, obj): super().__array_finalize__(obj) if hasattr(obj, '_ellipsoid'): self._ellipsoid = obj._ellipsoid def __len__(self): if self.shape == (): raise IndexError('0-d EarthLocation arrays cannot be indexed') else: return super().__len__() def _to_value(self, unit, equivalencies=[]): """Helper method for to and to_value.""" # Conversion to another unit in both ``to`` and ``to_value`` goes # via this routine. To make the regular quantity routines work, we # temporarily turn the structured array into a regular one. array_view = self.view(self._array_dtype, np.ndarray) if equivalencies == []: equivalencies = self._equivalencies new_array = self.unit.to(unit, array_view, equivalencies=equivalencies) return new_array.view(self.dtype).reshape(self.shape) geodetic_base_doc = """{__doc__} Parameters ---------- lon, lat : angle-like The longitude and latitude of the point(s), in angular units. The latitude should be between -90 and 90 degrees, and the longitude will be wrapped to an angle between 0 and 360 degrees. These can also be instances of `~astropy.coordinates.Angle` and either `~astropy.coordinates.Longitude` not `~astropy.coordinates.Latitude`, depending on the parameter. height : `~astropy.units.Quantity` ['length'] The height to the point(s). copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ @format_doc(geodetic_base_doc) class BaseGeodeticRepresentation(BaseRepresentation): """Base geodetic representation.""" attr_classes = {'lon': Longitude, 'lat': Latitude, 'height': u.Quantity} def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) if '_ellipsoid' in cls.__dict__: ELLIPSOIDS[cls._ellipsoid] = cls def __init__(self, lon, lat=None, height=None, copy=True): if height is None and not isinstance(lon, self.__class__): height = 0 << u.m super().__init__(lon, lat, height, copy=copy) if not self.height.unit.is_equivalent(u.m): raise u.UnitTypeError(f"{self.__class__.__name__} requires " f"height with units of length.") def to_cartesian(self): """ Converts WGS84 geodetic coordinates to 3D rectangular (geocentric) cartesian coordinates. """ xyz = erfa.gd2gc(getattr(erfa, self._ellipsoid), self.lon, self.lat, self.height) return CartesianRepresentation(xyz, xyz_axis=-1, copy=False) @classmethod def from_cartesian(cls, cart): """ Converts 3D rectangular cartesian coordinates (assumed geocentric) to WGS84 geodetic coordinates. """ lon, lat, height = erfa.gc2gd(getattr(erfa, cls._ellipsoid), cart.get_xyz(xyz_axis=-1)) return cls(lon, lat, height, copy=False) @format_doc(geodetic_base_doc) class WGS84GeodeticRepresentation(BaseGeodeticRepresentation): """Representation of points in WGS84 3D geodetic coordinates.""" _ellipsoid = 'WGS84' @format_doc(geodetic_base_doc) class WGS72GeodeticRepresentation(BaseGeodeticRepresentation): """Representation of points in WGS72 3D geodetic coordinates.""" _ellipsoid = 'WGS72' @format_doc(geodetic_base_doc) class GRS80GeodeticRepresentation(BaseGeodeticRepresentation): """Representation of points in GRS80 3D geodetic coordinates.""" _ellipsoid = 'GRS80'
87e9c692c397784bf8b84a051a7760afe444e34d631e226f23e83b573dd83c15
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains the fundamental classes used for representing coordinates in astropy. """ import warnings from collections import namedtuple import numpy as np from . import angle_formats as form from astropy import units as u from astropy.utils import isiterable __all__ = ['Angle', 'Latitude', 'Longitude'] # these are used by the `hms` and `dms` attributes hms_tuple = namedtuple('hms_tuple', ('h', 'm', 's')) dms_tuple = namedtuple('dms_tuple', ('d', 'm', 's')) signed_dms_tuple = namedtuple('signed_dms_tuple', ('sign', 'd', 'm', 's')) class Angle(u.SpecificTypeQuantity): """ One or more angular value(s) with units equivalent to radians or degrees. An angle can be specified either as an array, scalar, tuple (see below), string, `~astropy.units.Quantity` or another :class:`~astropy.coordinates.Angle`. The input parser is flexible and supports a variety of formats. The examples below illustrate common ways of initializing an `Angle` object. First some imports:: >>> from astropy.coordinates import Angle >>> from astropy import units as u The angle values can now be provided:: >>> Angle('10.2345d') <Angle 10.2345 deg> >>> Angle(['10.2345d', '-20d']) <Angle [ 10.2345, -20. ] deg> >>> Angle('1:2:30.43 degrees') <Angle 1.04178611 deg> >>> Angle('1 2 0 hours') <Angle 1.03333333 hourangle> >>> Angle(np.arange(1, 8), unit=u.deg) <Angle [1., 2., 3., 4., 5., 6., 7.] deg> >>> Angle('1°2′3″') <Angle 1.03416667 deg> >>> Angle('1°2′3″N') <Angle 1.03416667 deg> >>> Angle('1d2m3.4s') <Angle 1.03427778 deg> >>> Angle('1d2m3.4sS') <Angle -1.03427778 deg> >>> Angle('-1h2m3s') <Angle -1.03416667 hourangle> >>> Angle('-1h2m3sE') <Angle -1.03416667 hourangle> >>> Angle('-1h2.5m') <Angle -1.04166667 hourangle> >>> Angle('-1h2.5mW') <Angle 1.04166667 hourangle> >>> Angle('-1:2.5', unit=u.deg) <Angle -1.04166667 deg> >>> Angle(10.2345 * u.deg) <Angle 10.2345 deg> >>> Angle(Angle(10.2345 * u.deg)) <Angle 10.2345 deg> Parameters ---------- angle : `~numpy.array`, scalar, `~astropy.units.Quantity`, :class:`~astropy.coordinates.Angle` The angle value. If a tuple, will be interpreted as ``(h, m, s)`` or ``(d, m, s)`` depending on ``unit``. If a string, it will be interpreted following the rules described above. If ``angle`` is a sequence or array of strings, the resulting values will be in the given ``unit``, or if `None` is provided, the unit will be taken from the first given value. unit : unit-like, optional The unit of the value specified for the angle. This may be any string that `~astropy.units.Unit` understands, but it is better to give an actual unit object. Must be an angular unit. dtype : `~numpy.dtype`, optional See `~astropy.units.Quantity`. copy : bool, optional See `~astropy.units.Quantity`. Raises ------ `~astropy.units.UnitsError` If a unit is not provided or it is not an angular unit. """ _equivalent_unit = u.radian _include_easy_conversion_members = True def __new__(cls, angle, unit=None, dtype=None, copy=True, **kwargs): if not isinstance(angle, u.Quantity): if unit is not None: unit = cls._convert_unit_to_angle_unit(u.Unit(unit)) if isinstance(angle, tuple): angle = cls._tuple_to_float(angle, unit) elif isinstance(angle, str): angle, angle_unit = form.parse_angle(angle, unit) if angle_unit is None: angle_unit = unit if isinstance(angle, tuple): if angle_unit == u.hourangle: form._check_hour_range(angle[0]) form._check_minute_range(angle[1]) a = np.abs(angle[0]) + angle[1] / 60. if len(angle) == 3: form._check_second_range(angle[2]) a += angle[2] / 3600. angle = np.copysign(a, angle[0]) if angle_unit is not unit: # Possible conversion to `unit` will be done below. angle = u.Quantity(angle, angle_unit, copy=False) elif (isiterable(angle) and not (isinstance(angle, np.ndarray) and angle.dtype.kind not in 'SUVO')): angle = [Angle(x, unit, copy=False) for x in angle] return super().__new__(cls, angle, unit, dtype=dtype, copy=copy, **kwargs) @staticmethod def _tuple_to_float(angle, unit): """ Converts an angle represented as a 3-tuple or 2-tuple into a floating point number in the given unit. """ # TODO: Numpy array of tuples? if unit == u.hourangle: return form.hms_to_hours(*angle) elif unit == u.degree: return form.dms_to_degrees(*angle) else: raise u.UnitsError(f"Can not parse '{angle}' as unit '{unit}'") @staticmethod def _convert_unit_to_angle_unit(unit): return u.hourangle if unit is u.hour else unit def _set_unit(self, unit): super()._set_unit(self._convert_unit_to_angle_unit(unit)) @property def hour(self): """ The angle's value in hours (read-only property). """ return self.hourangle @property def hms(self): """ The angle's value in hours, as a named tuple with ``(h, m, s)`` members. (This is a read-only property.) """ return hms_tuple(*form.hours_to_hms(self.hourangle)) @property def dms(self): """ The angle's value in degrees, as a named tuple with ``(d, m, s)`` members. (This is a read-only property.) """ return dms_tuple(*form.degrees_to_dms(self.degree)) @property def signed_dms(self): """ The angle's value in degrees, as a named tuple with ``(sign, d, m, s)`` members. The ``d``, ``m``, ``s`` are thus always positive, and the sign of the angle is given by ``sign``. (This is a read-only property.) This is primarily intended for use with `dms` to generate string representations of coordinates that are correct for negative angles. """ return signed_dms_tuple(np.sign(self.degree), *form.degrees_to_dms(np.abs(self.degree))) def to_string(self, unit=None, decimal=False, sep='fromunit', precision=None, alwayssign=False, pad=False, fields=3, format=None): """ A string representation of the angle. Parameters ---------- unit : `~astropy.units.UnitBase`, optional Specifies the unit. Must be an angular unit. If not provided, the unit used to initialize the angle will be used. decimal : bool, optional If `True`, a decimal representation will be used, otherwise the returned string will be in sexagesimal form. sep : str, optional The separator between numbers in a sexagesimal representation. E.g., if it is ':', the result is ``'12:41:11.1241'``. Also accepts 2 or 3 separators. E.g., ``sep='hms'`` would give the result ``'12h41m11.1241s'``, or sep='-:' would yield ``'11-21:17.124'``. Alternatively, the special string 'fromunit' means 'dms' if the unit is degrees, or 'hms' if the unit is hours. precision : int, optional The level of decimal precision. If ``decimal`` is `True`, this is the raw precision, otherwise it gives the precision of the last place of the sexagesimal representation (seconds). If `None`, or not provided, the number of decimal places is determined by the value, and will be between 0-8 decimal places as required. alwayssign : bool, optional If `True`, include the sign no matter what. If `False`, only include the sign if it is negative. pad : bool, optional If `True`, include leading zeros when needed to ensure a fixed number of characters for sexagesimal representation. fields : int, optional Specifies the number of fields to display when outputting sexagesimal notation. For example: - fields == 1: ``'5d'`` - fields == 2: ``'5d45m'`` - fields == 3: ``'5d45m32.5s'`` By default, all fields are displayed. format : str, optional The format of the result. If not provided, an unadorned string is returned. Supported values are: - 'latex': Return a LaTeX-formatted string - 'latex_inline': Return a LaTeX-formatted string which is the same as with ``format='latex'`` for |Angle| instances - 'unicode': Return a string containing non-ASCII unicode characters, such as the degree symbol Returns ------- strrepr : str or array A string representation of the angle. If the angle is an array, this will be an array with a unicode dtype. """ if unit is None: unit = self.unit else: unit = self._convert_unit_to_angle_unit(u.Unit(unit)) separators = { None: { u.degree: 'dms', u.hourangle: 'hms'}, 'latex': { u.degree: [r'^\circ', r'{}^\prime', r'{}^{\prime\prime}'], u.hourangle: [r'^{\mathrm{h}}', r'^{\mathrm{m}}', r'^{\mathrm{s}}']}, 'unicode': { u.degree: '°′″', u.hourangle: 'ʰᵐˢ'} } # 'latex_inline' provides no functionality beyond what 'latex' offers, # but it should be implemented to avoid ValueErrors in user code. separators['latex_inline'] = separators['latex'] if sep == 'fromunit': if format not in separators: raise ValueError(f"Unknown format '{format}'") seps = separators[format] if unit in seps: sep = seps[unit] # Create an iterator so we can format each element of what # might be an array. if unit is u.degree: if decimal: values = self.degree if precision is not None: func = ("{0:0." + str(precision) + "f}").format else: func = '{:g}'.format else: if sep == 'fromunit': sep = 'dms' values = self.degree func = lambda x: form.degrees_to_string( x, precision=precision, sep=sep, pad=pad, fields=fields) elif unit is u.hourangle: if decimal: values = self.hour if precision is not None: func = ("{0:0." + str(precision) + "f}").format else: func = '{:g}'.format else: if sep == 'fromunit': sep = 'hms' values = self.hour func = lambda x: form.hours_to_string( x, precision=precision, sep=sep, pad=pad, fields=fields) elif unit.is_equivalent(u.radian): if decimal: values = self.to_value(unit) if precision is not None: func = ("{0:1." + str(precision) + "f}").format else: func = "{:g}".format elif sep == 'fromunit': values = self.to_value(unit) unit_string = unit.to_string(format=format) if format == 'latex' or format == 'latex_inline': unit_string = unit_string[1:-1] if precision is not None: def plain_unit_format(val): return ("{0:0." + str(precision) + "f}{1}").format( val, unit_string) func = plain_unit_format else: def plain_unit_format(val): return f"{val:g}{unit_string}" func = plain_unit_format else: raise ValueError( f"'{unit.name}' can not be represented in sexagesimal notation") else: raise u.UnitsError( "The unit value provided is not an angular unit.") def do_format(val): # Check if value is not nan to avoid ValueErrors when turning it into # a hexagesimal string. if not np.isnan(val): s = func(float(val)) if alwayssign and not s.startswith('-'): s = '+' + s if format == 'latex' or format == 'latex_inline': s = f'${s}$' return s s = f"{val}" return s format_ufunc = np.vectorize(do_format, otypes=['U']) result = format_ufunc(values) if result.ndim == 0: result = result[()] return result def _wrap_at(self, wrap_angle): """ Implementation that assumes ``angle`` is already validated and that wrapping is inplace. """ # Convert the wrap angle and 360 degrees to the native unit of # this Angle, then do all the math on raw Numpy arrays rather # than Quantity objects for speed. a360 = u.degree.to(self.unit, 360.0) wrap_angle = wrap_angle.to_value(self.unit) wrap_angle_floor = wrap_angle - a360 self_angle = self.view(np.ndarray) # Do the wrapping, but only if any angles need to be wrapped # # This invalid catch block is needed both for the floor division # and for the comparisons later on (latter not really needed # any more for >= 1.19 (NUMPY_LT_1_19), but former is). with np.errstate(invalid='ignore'): wraps = (self_angle - wrap_angle_floor) // a360 np.nan_to_num(wraps, copy=False) if np.any(wraps != 0): self_angle -= wraps*a360 # Rounding errors can cause problems. self_angle[self_angle >= wrap_angle] -= a360 self_angle[self_angle < wrap_angle_floor] += a360 def wrap_at(self, wrap_angle, inplace=False): """ Wrap the `~astropy.coordinates.Angle` object at the given ``wrap_angle``. This method forces all the angle values to be within a contiguous 360 degree range so that ``wrap_angle - 360d <= angle < wrap_angle``. By default a new Angle object is returned, but if the ``inplace`` argument is `True` then the `~astropy.coordinates.Angle` object is wrapped in place and nothing is returned. For instance:: >>> from astropy.coordinates import Angle >>> import astropy.units as u >>> a = Angle([-20.0, 150.0, 350.0] * u.deg) >>> a.wrap_at(360 * u.deg).degree # Wrap into range 0 to 360 degrees # doctest: +FLOAT_CMP array([340., 150., 350.]) >>> a.wrap_at('180d', inplace=True) # Wrap into range -180 to 180 degrees # doctest: +FLOAT_CMP >>> a.degree # doctest: +FLOAT_CMP array([-20., 150., -10.]) Parameters ---------- wrap_angle : angle-like Specifies a single value for the wrap angle. This can be any object that can initialize an `~astropy.coordinates.Angle` object, e.g. ``'180d'``, ``180 * u.deg``, or ``Angle(180, unit=u.deg)``. inplace : bool If `True` then wrap the object in place instead of returning a new `~astropy.coordinates.Angle` Returns ------- out : Angle or None If ``inplace is False`` (default), return new `~astropy.coordinates.Angle` object with angles wrapped accordingly. Otherwise wrap in place and return `None`. """ wrap_angle = Angle(wrap_angle, copy=False) # Convert to an Angle if not inplace: self = self.copy() self._wrap_at(wrap_angle) return None if inplace else self def is_within_bounds(self, lower=None, upper=None): """ Check if all angle(s) satisfy ``lower <= angle < upper`` If ``lower`` is not specified (or `None`) then no lower bounds check is performed. Likewise ``upper`` can be left unspecified. For example:: >>> from astropy.coordinates import Angle >>> import astropy.units as u >>> a = Angle([-20, 150, 350] * u.deg) >>> a.is_within_bounds('0d', '360d') False >>> a.is_within_bounds(None, '360d') True >>> a.is_within_bounds(-30 * u.deg, None) True Parameters ---------- lower : angle-like or None Specifies lower bound for checking. This can be any object that can initialize an `~astropy.coordinates.Angle` object, e.g. ``'180d'``, ``180 * u.deg``, or ``Angle(180, unit=u.deg)``. upper : angle-like or None Specifies upper bound for checking. This can be any object that can initialize an `~astropy.coordinates.Angle` object, e.g. ``'180d'``, ``180 * u.deg``, or ``Angle(180, unit=u.deg)``. Returns ------- is_within_bounds : bool `True` if all angles satisfy ``lower <= angle < upper`` """ ok = True if lower is not None: ok &= np.all(Angle(lower) <= self) if ok and upper is not None: ok &= np.all(self < Angle(upper)) return bool(ok) def _str_helper(self, format=None): if self.isscalar: return self.to_string(format=format) def formatter(x): return x.to_string(format=format) return np.array2string(self, formatter={'all': formatter}) def __str__(self): return self._str_helper() def _repr_latex_(self): return self._str_helper(format='latex') def _no_angle_subclass(obj): """Return any Angle subclass objects as an Angle objects. This is used to ensure that Latitude and Longitude change to Angle objects when they are used in calculations (such as lon/2.) """ if isinstance(obj, tuple): return tuple(_no_angle_subclass(_obj) for _obj in obj) return obj.view(Angle) if isinstance(obj, (Latitude, Longitude)) else obj class Latitude(Angle): """ Latitude-like angle(s) which must be in the range -90 to +90 deg. A Latitude object is distinguished from a pure :class:`~astropy.coordinates.Angle` by virtue of being constrained so that:: -90.0 * u.deg <= angle(s) <= +90.0 * u.deg Any attempt to set a value outside that range will result in a `ValueError`. The input angle(s) can be specified either as an array, list, scalar, tuple (see below), string, :class:`~astropy.units.Quantity` or another :class:`~astropy.coordinates.Angle`. The input parser is flexible and supports all of the input formats supported by :class:`~astropy.coordinates.Angle`. Parameters ---------- angle : array, list, scalar, `~astropy.units.Quantity`, `~astropy.coordinates.Angle` The angle value(s). If a tuple, will be interpreted as ``(h, m, s)`` or ``(d, m, s)`` depending on ``unit``. If a string, it will be interpreted following the rules described for :class:`~astropy.coordinates.Angle`. If ``angle`` is a sequence or array of strings, the resulting values will be in the given ``unit``, or if `None` is provided, the unit will be taken from the first given value. unit : unit-like, optional The unit of the value specified for the angle. This may be any string that `~astropy.units.Unit` understands, but it is better to give an actual unit object. Must be an angular unit. Raises ------ `~astropy.units.UnitsError` If a unit is not provided or it is not an angular unit. `TypeError` If the angle parameter is an instance of :class:`~astropy.coordinates.Longitude`. """ def __new__(cls, angle, unit=None, **kwargs): # Forbid creating a Lat from a Long. if isinstance(angle, Longitude): raise TypeError("A Latitude angle cannot be created from a Longitude angle") self = super().__new__(cls, angle, unit=unit, **kwargs) self._validate_angles() return self def _validate_angles(self, angles=None): """Check that angles are between -90 and 90 degrees. If not given, the check is done on the object itself""" # Convert the lower and upper bounds to the "native" unit of # this angle. This limits multiplication to two values, # rather than the N values in `self.value`. Also, the # comparison is performed on raw arrays, rather than Quantity # objects, for speed. if angles is None: angles = self lower = u.degree.to(angles.unit, -90.0) upper = u.degree.to(angles.unit, 90.0) # This invalid catch block can be removed when the minimum numpy # version is >= 1.19 (NUMPY_LT_1_19) with np.errstate(invalid='ignore'): invalid_angles = (np.any(angles.value < lower) or np.any(angles.value > upper)) if invalid_angles: raise ValueError('Latitude angle(s) must be within -90 deg <= angle <= 90 deg, ' 'got {}'.format(angles.to(u.degree))) def __setitem__(self, item, value): # Forbid assigning a Long to a Lat. if isinstance(value, Longitude): raise TypeError("A Longitude angle cannot be assigned to a Latitude angle") # first check bounds if value is not np.ma.masked: self._validate_angles(value) super().__setitem__(item, value) # Any calculation should drop to Angle def __array_ufunc__(self, *args, **kwargs): results = super().__array_ufunc__(*args, **kwargs) return _no_angle_subclass(results) class LongitudeInfo(u.QuantityInfo): _represent_as_dict_attrs = u.QuantityInfo._represent_as_dict_attrs + ('wrap_angle',) class Longitude(Angle): """ Longitude-like angle(s) which are wrapped within a contiguous 360 degree range. A ``Longitude`` object is distinguished from a pure :class:`~astropy.coordinates.Angle` by virtue of a ``wrap_angle`` property. The ``wrap_angle`` specifies that all angle values represented by the object will be in the range:: wrap_angle - 360 * u.deg <= angle(s) < wrap_angle The default ``wrap_angle`` is 360 deg. Setting ``wrap_angle=180 * u.deg`` would instead result in values between -180 and +180 deg. Setting the ``wrap_angle`` attribute of an existing ``Longitude`` object will result in re-wrapping the angle values in-place. The input angle(s) can be specified either as an array, list, scalar, tuple, string, :class:`~astropy.units.Quantity` or another :class:`~astropy.coordinates.Angle`. The input parser is flexible and supports all of the input formats supported by :class:`~astropy.coordinates.Angle`. Parameters ---------- angle : tuple or angle-like The angle value(s). If a tuple, will be interpreted as ``(h, m s)`` or ``(d, m, s)`` depending on ``unit``. If a string, it will be interpreted following the rules described for :class:`~astropy.coordinates.Angle`. If ``angle`` is a sequence or array of strings, the resulting values will be in the given ``unit``, or if `None` is provided, the unit will be taken from the first given value. unit : unit-like ['angle'], optional The unit of the value specified for the angle. This may be any string that `~astropy.units.Unit` understands, but it is better to give an actual unit object. Must be an angular unit. wrap_angle : angle-like or None, optional Angle at which to wrap back to ``wrap_angle - 360 deg``. If ``None`` (default), it will be taken to be 360 deg unless ``angle`` has a ``wrap_angle`` attribute already (i.e., is a ``Longitude``), in which case it will be taken from there. Raises ------ `~astropy.units.UnitsError` If a unit is not provided or it is not an angular unit. `TypeError` If the angle parameter is an instance of :class:`~astropy.coordinates.Latitude`. """ _wrap_angle = None _default_wrap_angle = Angle(360 * u.deg) info = LongitudeInfo() def __new__(cls, angle, unit=None, wrap_angle=None, **kwargs): # Forbid creating a Long from a Lat. if isinstance(angle, Latitude): raise TypeError("A Longitude angle cannot be created from " "a Latitude angle.") self = super().__new__(cls, angle, unit=unit, **kwargs) if wrap_angle is None: wrap_angle = getattr(angle, 'wrap_angle', self._default_wrap_angle) self.wrap_angle = wrap_angle # angle-like b/c property setter return self def __setitem__(self, item, value): # Forbid assigning a Lat to a Long. if isinstance(value, Latitude): raise TypeError("A Latitude angle cannot be assigned to a Longitude angle") super().__setitem__(item, value) self._wrap_at(self.wrap_angle) @property def wrap_angle(self): return self._wrap_angle @wrap_angle.setter def wrap_angle(self, value): self._wrap_angle = Angle(value, copy=False) self._wrap_at(self.wrap_angle) def __array_finalize__(self, obj): super().__array_finalize__(obj) self._wrap_angle = getattr(obj, '_wrap_angle', self._default_wrap_angle) # Any calculation should drop to Angle def __array_ufunc__(self, *args, **kwargs): results = super().__array_ufunc__(*args, **kwargs) return _no_angle_subclass(results)
d001322ad0b897bc4382eb3f4defe78bd27e7189324ed63e30eb385346cc4f6a
# Licensed under a 3-clause BSD style license - see LICENSE.rst from types import FunctionType from contextlib import contextmanager from functools import wraps from astropy.table import QTable __all__ = ['BaseTimeSeries', 'autocheck_required_columns'] COLUMN_RELATED_METHODS = ['add_column', 'add_columns', 'keep_columns', 'remove_column', 'remove_columns', 'rename_column'] def autocheck_required_columns(cls): """ This is a decorator that ensures that the table contains specific methods indicated by the _required_columns attribute. The aim is to decorate all methods that might affect the columns in the table and check for consistency after the methods have been run. """ def decorator_method(method): @wraps(method) def wrapper(self, *args, **kwargs): result = method(self, *args, **kwargs) self._check_required_columns() return result return wrapper for name in COLUMN_RELATED_METHODS: if (not hasattr(cls, name) or not isinstance(getattr(cls, name), FunctionType)): raise ValueError(f"{name} is not a valid method") setattr(cls, name, decorator_method(getattr(cls, name))) return cls class BaseTimeSeries(QTable): _required_columns = None _required_columns_enabled = True # If _required_column_relax is True, we don't require the columns to be # present but we do require them to be the correct ones IF present. Note # that this is a temporary state - as soon as the required columns # are all present, we toggle this to False _required_columns_relax = False def _check_required_columns(self): def as_scalar_or_list_str(obj): if not hasattr(obj, "__len__"): return f"'{obj}'" elif len(obj) == 1: return f"'{obj[0]}'" else: return str(obj) if not self._required_columns_enabled: return if self._required_columns is not None: if self._required_columns_relax: required_columns = self._required_columns[:len(self.colnames)] else: required_columns = self._required_columns plural = 's' if len(required_columns) > 1 else '' if not self._required_columns_relax and len(self.colnames) == 0: raise ValueError("{} object is invalid - expected '{}' " "as the first column{} but time series has no columns" .format(self.__class__.__name__, required_columns[0], plural)) elif self.colnames[:len(required_columns)] != required_columns: raise ValueError("{} object is invalid - expected {} " "as the first column{} but found {}" .format(self.__class__.__name__, as_scalar_or_list_str(required_columns), plural, as_scalar_or_list_str(self.colnames[:len(required_columns)]))) if (self._required_columns_relax and self._required_columns == self.colnames[:len(self._required_columns)]): self._required_columns_relax = False @contextmanager def _delay_required_column_checks(self): self._required_columns_enabled = False yield self._required_columns_enabled = True self._check_required_columns()
4524af84164587a5d7cf5f73ac6a3110df2df67358267d9d79650e922da881a4
# Licensed under a 3-clause BSD style license - see LICENSE.rst """This module contains dictionaries with sets of parameters for a given cosmology. Each cosmology has the following parameters defined: ========== ===================================== Oc0 Omega cold dark matter at z=0 Ob0 Omega baryon at z=0 Om0 Omega matter at z=0 flat Is this assumed flat? If not, Ode0 must be specified Ode0 Omega dark energy at z=0 if flat is False H0 Hubble parameter at z=0 in km/s/Mpc n Density perturbation spectral index Tcmb0 Current temperature of the CMB Neff Effective number of neutrino species m_nu Assumed mass of neutrino species, in eV. sigma8 Density perturbation amplitude tau Ionisation optical depth z_reion Redshift of hydrogen reionisation t0 Age of the universe in Gyr reference Reference for the parameters ========== ===================================== The list of cosmologies available are given by the tuple `available`. Current cosmologies available: Planck 2018 (Planck18) parameters from Planck Collaboration 2020, A&A, 641, A6 (Paper VI), Table 2 (TT, TE, EE + lowE + lensing + BAO) Planck 2015 (Planck15) parameters from Planck Collaboration 2016, A&A, 594, A13 (Paper XIII), Table 4 (TT, TE, EE + lowP + lensing + ext) Planck 2013 (Planck13) parameters from Planck Collaboration 2014, A&A, 571, A16 (Paper XVI), Table 5 (Planck + WP + highL + BAO) WMAP 9 year (WMAP9) parameters from Hinshaw et al. 2013, ApJS, 208, 19, doi: 10.1088/0067-0049/208/2/19. Table 4 (WMAP9 + eCMB + BAO + H0) WMAP 7 year (WMAP7) parameters from Komatsu et al. 2011, ApJS, 192, 18, doi: 10.1088/0067-0049/192/2/18. Table 1 (WMAP + BAO + H0 ML). WMAP 5 year (WMAP5) parameters from Komatsu et al. 2009, ApJS, 180, 330, doi: 10.1088/0067-0049/180/2/330. Table 1 (WMAP + BAO + SN ML). WMAP 3 year (WMAP3) parameters from Spergel et al. 2007, ApJS, 170, 377, doi: 10.1086/513700. Table 6. (WMAP + SNGold) Obtained from https://lambda.gsfc.nasa.gov/product/map/dr2/params/lcdm_wmap_sngold.cfm Tcmb0 and Neff are the standard values as also used for WMAP5, 7, 9. Pending WMAP team approval and subject to change. WMAP 1 year (WMAP1) parameters from Spergel et al. 2003, ApJS, 148, 175, doi: 10.1086/377226. Table 7 (WMAP + CBI + ACBAR + 2dFGRS + Lya) Tcmb0 and Neff are the standard values as also used for WMAP5, 7, 9. Pending WMAP team approval and subject to change. """ # STDLIB import sys from types import MappingProxyType # LOCAL from .realizations import available __all__ = ["available"] + list(available) def __getattr__(name): """Get parameters of cosmology representations with lazy import from `PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_. """ from astropy.cosmology import realizations cosmo = getattr(realizations, name) m = cosmo.to_format("mapping", cosmology_as_str=True, move_from_meta=True) proxy = MappingProxyType(m) # Cache in this module so `__getattr__` is only called once per `name`. setattr(sys.modules[__name__], name, proxy) return proxy def __dir__(): """Directory, including lazily-imported objects.""" return __all__
62bd69ffe08766df04f9477574cfb3a624469b2c9b6e56d3685ee74abc026b10
# Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy.cosmology import units as cu from astropy.io import registry as io_registry from astropy.units import add_enabled_units __all__ = ["CosmologyRead", "CosmologyWrite", "CosmologyFromFormat", "CosmologyToFormat"] __doctest_skip__ = __all__ # ============================================================================== # Read / Write readwrite_registry = io_registry.UnifiedIORegistry() class CosmologyRead(io_registry.UnifiedReadWrite): """Read and parse data to a `~astropy.cosmology.Cosmology`. This function provides the Cosmology interface to the Astropy unified I/O layer. This allows easily reading a file in supported data formats using syntax such as:: >>> from astropy.cosmology import Cosmology >>> cosmo1 = Cosmology.read('<file name>') When the ``read`` method is called from a subclass the subclass will provide a keyword argument ``cosmology=<class>`` to the registered read method. The method uses this cosmology class, regardless of the class indicated in the file, and sets parameters' default values from the class' signature. Get help on the available readers using the ``help()`` method:: >>> Cosmology.read.help() # Get help reading and list supported formats >>> Cosmology.read.help(format='<format>') # Get detailed help on a format >>> Cosmology.read.list_formats() # Print list of available formats See also: https://docs.astropy.org/en/stable/io/unified.html Parameters ---------- *args Positional arguments passed through to data reader. If supplied the first argument is typically the input filename. format : str (optional, keyword-only) File format specifier. **kwargs Keyword arguments passed through to data reader. Returns ------- out : `~astropy.cosmology.Cosmology` subclass instance `~astropy.cosmology.Cosmology` corresponding to file contents. Notes ----- """ def __init__(self, instance, cosmo_cls): super().__init__(instance, cosmo_cls, "read", registry=readwrite_registry) def __call__(self, *args, **kwargs): from astropy.cosmology.core import Cosmology # so subclasses can override, also pass the class as a kwarg. # allows for `FlatLambdaCDM.read` and # `Cosmology.read(..., cosmology=FlatLambdaCDM)` if self._cls is not Cosmology: kwargs.setdefault("cosmology", self._cls) # set, if not present # check that it is the correct cosmology, can be wrong if user # passes in e.g. `w0wzCDM.read(..., cosmology=FlatLambdaCDM)` valid = (self._cls, self._cls.__qualname__) if kwargs["cosmology"] not in valid: raise ValueError( "keyword argument `cosmology` must be either the class " f"{valid[0]} or its qualified name '{valid[1]}'") with add_enabled_units(cu): cosmo = self.registry.read(self._cls, *args, **kwargs) return cosmo class CosmologyWrite(io_registry.UnifiedReadWrite): """Write this Cosmology object out in the specified format. This function provides the Cosmology interface to the astropy unified I/O layer. This allows easily writing a file in supported data formats using syntax such as:: >>> from astropy.cosmology import Planck18 >>> Planck18.write('<file name>') Get help on the available writers for ``Cosmology`` using the ``help()`` method:: >>> Cosmology.write.help() # Get help writing and list supported formats >>> Cosmology.write.help(format='<format>') # Get detailed help on format >>> Cosmology.write.list_formats() # Print list of available formats Parameters ---------- *args Positional arguments passed through to data writer. If supplied the first argument is the output filename. format : str (optional, keyword-only) File format specifier. **kwargs Keyword arguments passed through to data writer. Notes ----- """ def __init__(self, instance, cls): super().__init__(instance, cls, "write", registry=readwrite_registry) def __call__(self, *args, **kwargs): self.registry.write(self._instance, *args, **kwargs) # ============================================================================== # Format Interchange # for transforming instances, e.g. Cosmology <-> dict convert_registry = io_registry.UnifiedIORegistry() class CosmologyFromFormat(io_registry.UnifiedReadWrite): """Transform object to a `~astropy.cosmology.Cosmology`. This function provides the Cosmology interface to the Astropy unified I/O layer. This allows easily parsing supported data formats using syntax such as:: >>> from astropy.cosmology import Cosmology >>> cosmo1 = Cosmology.from_format(cosmo_mapping, format='mapping') When the ``from_format`` method is called from a subclass the subclass will provide a keyword argument ``cosmology=<class>`` to the registered parser. The method uses this cosmology class, regardless of the class indicated in the data, and sets parameters' default values from the class' signature. Get help on the available readers using the ``help()`` method:: >>> Cosmology.from_format.help() # Get help and list supported formats >>> Cosmology.from_format.help('<format>') # Get detailed help on a format >>> Cosmology.from_format.list_formats() # Print list of available formats See also: https://docs.astropy.org/en/stable/io/unified.html Parameters ---------- obj : object The object to parse according to 'format' *args Positional arguments passed through to data parser. format : str or None, optional keyword-only Object format specifier. For `None` (default) CosmologyFromFormat tries to identify the correct format. **kwargs Keyword arguments passed through to data parser. Parsers should accept the following keyword arguments: - cosmology : the class (or string name thereof) to use / check when constructing the cosmology instance. Returns ------- out : `~astropy.cosmology.Cosmology` subclass instance `~astropy.cosmology.Cosmology` corresponding to ``obj`` contents. """ def __init__(self, instance, cosmo_cls): super().__init__(instance, cosmo_cls, "read", registry=convert_registry) def __call__(self, obj, *args, format=None, **kwargs): from astropy.cosmology.core import Cosmology # so subclasses can override, also pass the class as a kwarg. # allows for `FlatLambdaCDM.read` and # `Cosmology.read(..., cosmology=FlatLambdaCDM)` if self._cls is not Cosmology: kwargs.setdefault("cosmology", self._cls) # set, if not present # check that it is the correct cosmology, can be wrong if user # passes in e.g. `w0wzCDM.read(..., cosmology=FlatLambdaCDM)` valid = (self._cls, self._cls.__qualname__) if kwargs["cosmology"] not in valid: raise ValueError( "keyword argument `cosmology` must be either the class " f"{valid[0]} or its qualified name '{valid[1]}'") with add_enabled_units(cu): cosmo = self.registry.read(self._cls, obj, *args, format=format, **kwargs) return cosmo class CosmologyToFormat(io_registry.UnifiedReadWrite): """Transform this Cosmology to another format. This function provides the Cosmology interface to the astropy unified I/O layer. This allows easily transforming to supported data formats using syntax such as:: >>> from astropy.cosmology import Planck18 >>> Planck18.to_format("mapping") {'cosmology': astropy.cosmology.core.FlatLambdaCDM, 'name': 'Planck18', 'H0': <Quantity 67.66 km / (Mpc s)>, 'Om0': 0.30966, ... Get help on the available representations for ``Cosmology`` using the ``help()`` method:: >>> Cosmology.to_format.help() # Get help and list supported formats >>> Cosmology.to_format.help('<format>') # Get detailed help on format >>> Cosmology.to_format.list_formats() # Print list of available formats Parameters ---------- format : str Format specifier. *args Positional arguments passed through to data writer. If supplied the first argument is the output filename. **kwargs Keyword arguments passed through to data writer. """ def __init__(self, instance, cls): super().__init__(instance, cls, "write", registry=convert_registry) def __call__(self, format, *args, **kwargs): return self.registry.write(self._instance, None, *args, format=format, **kwargs)
467139c92184b906932eb58928d47a3b09ecca5798e6e1a8f7ac6af1beb6f327
# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import annotations import abc import inspect from typing import Mapping, Optional, Set, Type, TypeVar import numpy as np from astropy.io.registry import UnifiedReadWriteMethod from astropy.utils.decorators import classproperty from astropy.utils.metadata import MetaData from .connect import CosmologyFromFormat, CosmologyRead, CosmologyToFormat, CosmologyWrite from .parameter import Parameter # Originally authored by Andrew Becker ([email protected]), # and modified by Neil Crighton ([email protected]), Roban Kramer # ([email protected]), and Nathaniel Starkman ([email protected]). # Many of these adapted from Hogg 1999, astro-ph/9905116 # and Linder 2003, PRL 90, 91301 __all__ = ["Cosmology", "CosmologyError", "FlatCosmologyMixin"] __doctest_requires__ = {} # needed until __getattr__ removed ############################################################################## # Parameters # registry of cosmology classes with {key=name : value=class} _COSMOLOGY_CLASSES = dict() # typing _CosmoT = TypeVar("_CosmoT", bound="Cosmology") _FlatCosmoT = TypeVar("_FlatCosmoT", bound="FlatCosmologyMixin") ############################################################################## class CosmologyError(Exception): pass class Cosmology(metaclass=abc.ABCMeta): """Base-class for all Cosmologies. Parameters ---------- *args Arguments into the cosmology; used by subclasses, not this base class. name : str or None (optional, keyword-only) The name of the cosmology. meta : dict or None (optional, keyword-only) Metadata for the cosmology, e.g., a reference. **kwargs Arguments into the cosmology; used by subclasses, not this base class. Notes ----- Class instances are static -- you cannot (and should not) change the values of the parameters. That is, all of the above attributes (except meta) are read only. For details on how to create performant custom subclasses, see the documentation on :ref:`astropy-cosmology-fast-integrals`. """ meta = MetaData() # Unified I/O object interchange methods from_format = UnifiedReadWriteMethod(CosmologyFromFormat) to_format = UnifiedReadWriteMethod(CosmologyToFormat) # Unified I/O read and write methods read = UnifiedReadWriteMethod(CosmologyRead) write = UnifiedReadWriteMethod(CosmologyWrite) # Parameters __parameters__ = () __all_parameters__ = () # --------------------------------------------------------------- def __init_subclass__(cls): super().__init_subclass__() # ------------------- # Parameters # Get parameters that are still Parameters, either in this class or above. parameters = [] derived_parameters = [] for n in cls.__parameters__: p = getattr(cls, n) if isinstance(p, Parameter): derived_parameters.append(n) if p.derived else parameters.append(n) # Add new parameter definitions for n, v in cls.__dict__.items(): if n in parameters or n.startswith("_") or not isinstance(v, Parameter): continue derived_parameters.append(n) if v.derived else parameters.append(n) # reorder to match signature ordered = [parameters.pop(parameters.index(n)) for n in cls._init_signature.parameters.keys() if n in parameters] parameters = ordered + parameters # place "unordered" at the end cls.__parameters__ = tuple(parameters) cls.__all_parameters__ = cls.__parameters__ + tuple(derived_parameters) # ------------------- # register as a Cosmology subclass _COSMOLOGY_CLASSES[cls.__qualname__] = cls @classproperty(lazy=True) def _init_signature(cls): """Initialization signature (without 'self').""" # get signature, dropping "self" by taking arguments [1:] sig = inspect.signature(cls.__init__) sig = sig.replace(parameters=list(sig.parameters.values())[1:]) return sig # --------------------------------------------------------------- def __init__(self, name=None, meta=None): self._name = str(name) if name is not None else name self.meta.update(meta or {}) @property def name(self): """The name of the Cosmology instance.""" return self._name @property @abc.abstractmethod def is_flat(self): """ Return bool; `True` if the cosmology is flat. This is abstract and must be defined in subclasses. """ raise NotImplementedError("is_flat is not implemented") def clone(self, *, meta=None, **kwargs): """Returns a copy of this object with updated parameters, as specified. This cannot be used to change the type of the cosmology, so ``clone()`` cannot be used to change between flat and non-flat cosmologies. Parameters ---------- meta : mapping or None (optional, keyword-only) Metadata that will update the current metadata. **kwargs Cosmology parameter (and name) modifications. If any parameter is changed and a new name is not given, the name will be set to "[old name] (modified)". Returns ------- newcosmo : `~astropy.cosmology.Cosmology` subclass instance A new instance of this class with updated parameters as specified. If no arguments are given, then a reference to this object is returned instead of copy. Examples -------- To make a copy of the ``Planck13`` cosmology with a different matter density (``Om0``), and a new name: >>> from astropy.cosmology import Planck13 >>> Planck13.clone(name="Modified Planck 2013", Om0=0.35) FlatLambdaCDM(name="Modified Planck 2013", H0=67.77 km / (Mpc s), Om0=0.35, ... If no name is specified, the new name will note the modification. >>> Planck13.clone(Om0=0.35).name 'Planck13 (modified)' """ # Quick return check, taking advantage of the Cosmology immutability. if meta is None and not kwargs: return self # There are changed parameter or metadata values. # The name needs to be changed accordingly, if it wasn't already. _modname = self.name + " (modified)" kwargs.setdefault("name", (_modname if self.name is not None else None)) # mix new meta into existing, preferring the former. meta = meta if meta is not None else {} new_meta = {**self.meta, **meta} # Mix kwargs into initial arguments, preferring the former. new_init = {**self._init_arguments, "meta": new_meta, **kwargs} # Create BoundArgument to handle args versus kwargs. # This also handles all errors from mismatched arguments ba = self._init_signature.bind_partial(**new_init) # Instantiate, respecting args vs kwargs cloned = type(self)(*ba.args, **ba.kwargs) # Check if nothing has changed. # TODO! or should return self? if (cloned.name == _modname) and not meta and cloned.is_equivalent(self): cloned._name = self.name return cloned @property def _init_arguments(self): # parameters kw = {n: getattr(self, n) for n in self.__parameters__} # other info kw["name"] = self.name kw["meta"] = self.meta return kw # --------------------------------------------------------------- # comparison methods def is_equivalent(self, other, *, format=False): r"""Check equivalence between Cosmologies. Two cosmologies may be equivalent even if not the same class. For example, an instance of ``LambdaCDM`` might have :math:`\Omega_0=1` and :math:`\Omega_k=0` and therefore be flat, like ``FlatLambdaCDM``. Parameters ---------- other : `~astropy.cosmology.Cosmology` subclass instance The object in which to compare. format : bool or None or str, optional keyword-only Whether to allow, before equivalence is checked, the object to be converted to a |Cosmology|. This allows, e.g. a |Table| to be equivalent to a Cosmology. `False` (default) will not allow conversion. `True` or `None` will, and will use the auto-identification to try to infer the correct format. A `str` is assumed to be the correct format to use when converting. Returns ------- bool True if cosmologies are equivalent, False otherwise. Examples -------- Two cosmologies may be equivalent even if not of the same class. In this examples the ``LambdaCDM`` has ``Ode0`` set to the same value calculated in ``FlatLambdaCDM``. >>> import astropy.units as u >>> from astropy.cosmology import LambdaCDM, FlatLambdaCDM >>> cosmo1 = LambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, 0.7) >>> cosmo2 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3) >>> cosmo1.is_equivalent(cosmo2) True While in this example, the cosmologies are not equivalent. >>> cosmo3 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, Tcmb0=3 * u.K) >>> cosmo3.is_equivalent(cosmo2) False Also, using the keyword argument, the notion of equivalence is extended to any Python object that can be converted to a |Cosmology|. >>> from astropy.cosmology import Planck18 >>> tbl = Planck18.to_format("astropy.table") >>> Planck18.is_equivalent(tbl, format=True) True The list of valid formats, e.g. the |Table| in this example, may be checked with ``Cosmology.from_format.list_formats()``. As can be seen in the list of formats, not all formats can be auto-identified by ``Cosmology.from_format.registry``. Objects of these kinds can still be checked for equivalence, but the correct format string must be used. >>> tbl = Planck18.to_format("yaml") >>> Planck18.is_equivalent(tbl, format="yaml") True """ # Allow for different formats to be considered equivalent. if format is not False: format = None if format is True else format # str->str, None/True->None try: other = Cosmology.from_format(other, format=format) except Exception: # TODO! should enforce only TypeError return False # The options are: 1) same class & parameters; 2) same class, different # parameters; 3) different classes, equivalent parameters; 4) different # classes, different parameters. (1) & (3) => True, (2) & (4) => False. equiv = self.__equiv__(other) if equiv is NotImplemented and hasattr(other, "__equiv__"): equiv = other.__equiv__(self) # that failed, try from 'other' return equiv if equiv is not NotImplemented else False def __equiv__(self, other): """Cosmology equivalence. Use ``.is_equivalent()`` for actual check! Parameters ---------- other : `~astropy.cosmology.Cosmology` subclass instance The object in which to compare. Returns ------- bool or `NotImplemented` `NotImplemented` if 'other' is from a different class. `True` if 'other' is of the same class and has matching parameters and parameter values. `False` otherwise. """ if other.__class__ is not self.__class__: return NotImplemented # allows other.__equiv__ # check all parameters in 'other' match those in 'self' and 'other' has # no extra parameters (latter part should never happen b/c same class) params_eq = (set(self.__all_parameters__) == set(other.__all_parameters__) and all(np.all(getattr(self, k) == getattr(other, k)) for k in self.__all_parameters__)) return params_eq def __eq__(self, other): """Check equality between Cosmologies. Checks the Parameters and immutable fields (i.e. not "meta"). Parameters ---------- other : `~astropy.cosmology.Cosmology` subclass instance The object in which to compare. Returns ------- bool `True` if Parameters and names are the same, `False` otherwise. """ if other.__class__ is not self.__class__: return NotImplemented # allows other.__eq__ # check all parameters in 'other' match those in 'self' equivalent = self.__equiv__(other) # non-Parameter checks: name name_eq = (self.name == other.name) return equivalent and name_eq # --------------------------------------------------------------- def __repr__(self): namelead = f"{self.__class__.__qualname__}(" if self.name is not None: namelead += f"name=\"{self.name}\", " # nicely formatted parameters fmtps = (f'{k}={getattr(self, k)}' for k in self.__parameters__) return namelead + ", ".join(fmtps) + ")" def __astropy_table__(self, cls, copy, **kwargs): """Return a `~astropy.table.Table` of type ``cls``. Parameters ---------- cls : type Astropy ``Table`` class or subclass. copy : bool Ignored. **kwargs : dict, optional Additional keyword arguments. Passed to ``self.to_format()``. See ``Cosmology.to_format.help("astropy.table")`` for allowed kwargs. Returns ------- `astropy.table.Table` or subclass instance Instance of type ``cls``. """ return self.to_format("astropy.table", cls=cls, **kwargs) class FlatCosmologyMixin(metaclass=abc.ABCMeta): """ Mixin class for flat cosmologies. Do NOT instantiate directly. Note that all instances of ``FlatCosmologyMixin`` are flat, but not all flat cosmologies are instances of ``FlatCosmologyMixin``. As example, ``LambdaCDM`` **may** be flat (for the a specific set of parameter values), but ``FlatLambdaCDM`` **will** be flat. """ def __init_subclass__(cls: Type[_FlatCosmoT]) -> None: super().__init_subclass__() # Determine the non-flat class. # This will raise a TypeError if the MRO is inconsistent. cls._nonflat_cls_ # =============================================================== @classmethod # TODO! make metaclass-method def _get_nonflat_cls(cls, kls: Optional[Type[_CosmoT]]=None) -> Optional[Type[Cosmology]]: """Find the corresponding non-flat class. The class' bases are searched recursively. Parameters ---------- kls : :class:`astropy.cosmology.Cosmology` class or None, optional If `None` (default) this class is searched instead of `kls`. Raises ------ TypeError If more than one non-flat class is found at the same level of the inheritance. This is similar to the error normally raised by Python for an inconsistent method resolution order. Returns ------- type A :class:`Cosmology` subclass this class inherits from that is not a :class:`FlatCosmologyMixin` subclass. """ _kls = cls if kls is None else kls # Find non-flat classes nonflat: Set[Type[Cosmology]] nonflat = {b for b in _kls.__bases__ if issubclass(b, Cosmology) and not issubclass(b, FlatCosmologyMixin)} if not nonflat: # e.g. subclassing FlatLambdaCDM nonflat = {k for b in _kls.__bases__ if (k := cls._get_nonflat_cls(b)) is not None} if len(nonflat) > 1: raise TypeError( f"cannot create a consistent non-flat class resolution order " f"for {_kls} with bases {nonflat} at the same inheritance level." ) if not nonflat: # e.g. FlatFLRWMixin(FlatCosmologyMixin) return None return nonflat.pop() _nonflat_cls_ = classproperty(_get_nonflat_cls, lazy=True, doc="Return the corresponding non-flat class.") # =============================================================== @property def is_flat(self): """Return `True`, the cosmology is flat.""" return True @abc.abstractmethod def nonflat(self: _FlatCosmoT) -> _CosmoT: """Return the equivalent non-flat-class instance of this cosmology.""" def clone(self, *, meta: Optional[Mapping] = None, to_nonflat: bool = False, **kwargs): """Returns a copy of this object with updated parameters, as specified. This cannot be used to change the type of the cosmology, except for changing to the non-flat version of this cosmology. Parameters ---------- meta : mapping or None (optional, keyword-only) Metadata that will update the current metadata. to_nonflat : bool, optional keyword-only Whether to change to the non-flat version of this cosmology. **kwargs Cosmology parameter (and name) modifications. If any parameter is changed and a new name is not given, the name will be set to "[old name] (modified)". Returns ------- newcosmo : `~astropy.cosmology.Cosmology` subclass instance A new instance of this class with updated parameters as specified. If no arguments are given, then a reference to this object is returned instead of copy. Examples -------- To make a copy of the ``Planck13`` cosmology with a different matter density (``Om0``), and a new name: >>> from astropy.cosmology import Planck13 >>> Planck13.clone(name="Modified Planck 2013", Om0=0.35) FlatLambdaCDM(name="Modified Planck 2013", H0=67.77 km / (Mpc s), Om0=0.35, ... If no name is specified, the new name will note the modification. >>> Planck13.clone(Om0=0.35).name 'Planck13 (modified)' The keyword 'to_nonflat' can be used to clone on the non-flat equivalent cosmology. >>> Planck13.clone(to_nonflat=True) LambdaCDM(name="Planck13", ... >>> Planck13.clone(H0=70, to_nonflat=True) LambdaCDM(name="Planck13 (modified)", H0=70.0 km / (Mpc s), ... """ if to_nonflat: return self.nonflat.clone(meta=meta, **kwargs) return super().clone(meta=meta, **kwargs) # ----------------------------------------------------------------------------- def __getattr__(attr): from . import flrw if hasattr(flrw, attr) and attr not in ("__path__", ): import warnings from astropy.utils.exceptions import AstropyDeprecationWarning warnings.warn( f"`astropy.cosmology.core.{attr}` has been moved (since v5.0) and " f"should be imported as ``from astropy.cosmology import {attr}``." " In future this will raise an exception.", AstropyDeprecationWarning ) return getattr(flrw, attr) raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
3f29c376d6aee15921ab5b884ff0423e42ea11facdd4a206a993e6dbb719283b
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ astropy.cosmology contains classes and functions for cosmological distance measures and other cosmology-related calculations. See the `Astropy documentation <https://docs.astropy.org/en/latest/cosmology/index.html>`_ for more detailed usage examples and references. """ from . import core, flrw, funcs, parameter, units, utils # noqa F401 from . import io # needed before 'realizations' # noqa: F401 # isort: split from . import realizations from .core import * # noqa F401, F403 from .flrw import * # noqa F401, F403 from .funcs import * # noqa F401, F403 from .parameter import * # noqa F401, F403 from .realizations import available, default_cosmology # noqa F401, F403 from .utils import * # noqa F401, F403 __all__ = (core.__all__ + flrw.__all__ # cosmology classes + realizations.__all__ # instances thereof + ["units"] + funcs.__all__ + parameter.__all__ + utils.__all__) # utils def __getattr__(name): """Get realizations using lazy import from `PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_. Raises ------ AttributeError If "name" is not in :mod:`astropy.cosmology.realizations` """ if name not in available: raise AttributeError(f"module {__name__!r} has no attribute {name!r}.") return getattr(realizations, name) def __dir__(): """Directory, including lazily-imported objects.""" return __all__
fe056b4e8cd30de6e93deb75d3230d34f98e94027bea2dcf92a50a7a332e0195
# Licensed under a 3-clause BSD style license - see LICENSE.rst import copy import astropy.units as u from astropy.utils.decorators import deprecated_attribute, deprecated_renamed_argument __all__ = ["Parameter"] class Parameter: r"""Cosmological parameter (descriptor). Should only be used with a :class:`~astropy.cosmology.Cosmology` subclass. Parameters ---------- derived : bool (optional, keyword-only) Whether the Parameter is 'derived', default `False`. Derived parameters behave similarly to normal parameters, but are not sorted by the |Cosmology| signature (probably not there) and are not included in all methods. For reference, see ``Ode0`` in ``FlatFLRWMixin``, which removes :math:`\Omega_{de,0}`` as an independent parameter (:math:`\Omega_{de,0} \equiv 1 - \Omega_{tot}`). unit : unit-like or None (optional, keyword-only) The `~astropy.units.Unit` for the Parameter. If None (default) no unit as assumed. equivalencies : `~astropy.units.Equivalency` or sequence thereof Unit equivalencies for this Parameter. fvalidate : callable[[object, object, Any], Any] or str (optional, keyword-only) Function to validate the Parameter value from instances of the cosmology class. If "default", uses default validator to assign units (with equivalencies), if Parameter has units. For other valid string options, see ``Parameter._registry_validators``. 'fvalidate' can also be set through a decorator with :meth:`~astropy.cosmology.Parameter.validator`. fmt : str (optional, keyword-only) `format` specification, used when making string representation of the containing Cosmology. See https://docs.python.org/3/library/string.html#formatspec .. deprecated:: 5.1 doc : str or None (optional, keyword-only) Parameter description. Examples -------- For worked examples see :class:`~astropy.cosmology.FLRW`. """ _registry_validators = {} @deprecated_renamed_argument("fmt", None, since="5.1") def __init__(self, *, derived=False, unit=None, equivalencies=[], fvalidate="default", fmt="", doc=None): # attribute name on container cosmology class. # really set in __set_name__, but if Parameter is not init'ed as a # descriptor this ensures that the attributes exist. self._attr_name = self._attr_name_private = None self._derived = derived self._format_spec = str(fmt) # deprecated. self.__doc__ = doc # units stuff self._unit = u.Unit(unit) if unit is not None else None self._equivalencies = equivalencies # Parse registered `fvalidate` self._fvalidate_in = fvalidate # Always store input fvalidate. if callable(fvalidate): pass elif fvalidate in self._registry_validators: fvalidate = self._registry_validators[fvalidate] elif isinstance(fvalidate, str): raise ValueError("`fvalidate`, if str, must be in " f"{self._registry_validators.keys()}") else: raise TypeError("`fvalidate` must be a function or " f"{self._registry_validators.keys()}") self._fvalidate = fvalidate def __set_name__(self, cosmo_cls, name): # attribute name on container cosmology class self._attr_name = name self._attr_name_private = "_" + name @property def name(self): """Parameter name.""" return self._attr_name @property def unit(self): """Parameter unit.""" return self._unit @property def equivalencies(self): """Equivalencies used when initializing Parameter.""" return self._equivalencies format_spec = deprecated_attribute("format_spec", since="5.1") @property def derived(self): """Whether the Parameter is derived; true parameters are not.""" return self._derived # ------------------------------------------- # descriptor and property-like methods def __get__(self, cosmology, cosmo_cls=None): # Get from class if cosmology is None: return self # Get from instance return getattr(cosmology, self._attr_name_private) def __set__(self, cosmology, value): """Allows attribute setting once. Raises AttributeError subsequently.""" # Raise error if setting 2nd time. if hasattr(cosmology, self._attr_name_private): raise AttributeError(f"can't set attribute {self._attr_name} again") # Validate value, generally setting units if present value = self.validate(cosmology, copy.deepcopy(value)) # Make the value read-only, if ndarray-like if hasattr(value, "setflags"): value.setflags(write=False) # Set the value on the cosmology setattr(cosmology, self._attr_name_private, value) # ------------------------------------------- # validate value @property def fvalidate(self): """Function to validate a potential value of this Parameter.""" return self._fvalidate def validator(self, fvalidate): """Make new Parameter with custom ``fvalidate``. Note: ``Parameter.fvalidator`` must be the top-most descriptor decorator. Parameters ---------- fvalidate : callable[[type, type, Any], Any] Returns ------- `~astropy.cosmology.Parameter` Copy of this Parameter but with custom ``fvalidate``. """ return self.clone(fvalidate=fvalidate) def validate(self, cosmology, value): """Run the validator on this Parameter. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` instance value : Any The object to validate. Returns ------- Any The output of calling ``fvalidate(cosmology, self, value)`` (yes, that parameter order). """ return self.fvalidate(cosmology, self, value) @classmethod def register_validator(cls, key, fvalidate=None): """Decorator to register a new kind of validator function. Parameters ---------- key : str fvalidate : callable[[object, object, Any], Any] or None, optional Value validation function. Returns ------- ``validator`` or callable[``validator``] if validator is None returns a function that takes and registers a validator. This allows ``register_validator`` to be used as a decorator. """ if key in cls._registry_validators: raise KeyError(f"validator {key!r} already registered with Parameter.") # fvalidate directly passed if fvalidate is not None: cls._registry_validators[key] = fvalidate return fvalidate # for use as a decorator def register(fvalidate): """Register validator function. Parameters ---------- fvalidate : callable[[object, object, Any], Any] Validation function. Returns ------- ``validator`` """ cls._registry_validators[key] = fvalidate return fvalidate return register # ------------------------------------------- def _get_init_arguments(self, processed=False): """Initialization arguments. Parameters ---------- processed : bool Whether to more closely reproduce the input arguments (`False`, default) or the processed arguments (`True`). The former is better for string representations and round-tripping with ``eval(repr())``. Returns ------- dict[str, Any] """ # The keys are added in this order because `repr` prints them in order. kw = {"derived": self.derived, "unit": self.unit, "equivalencies": self.equivalencies, # Validator is always turned into a function, but for ``repr`` it's nice # to know if it was originally a string. "fvalidate": self.fvalidate if processed else self._fvalidate_in, "doc": self.__doc__} # fmt will issue a deprecation warning if passed, so only passed if # it's not the default. if self._format_spec: kw["fmt"] = self._format_spec return kw def clone(self, **kw): """Clone this `Parameter`, changing any constructor argument. Parameters ---------- **kw Passed to constructor. The current values, eg. ``fvalidate`` are used as the default values, so an empty ``**kw`` is an exact copy. Examples -------- >>> p = Parameter() >>> p Parameter(derived=False, unit=None, equivalencies=[], fvalidate='default', doc=None) >>> p.clone(unit="km") Parameter(derived=False, unit=Unit("km"), equivalencies=[], fvalidate='default', doc=None) """ # Start with defaults, update from kw. kwargs = {**self._get_init_arguments(), **kw} # All initialization failures, like incorrect input are handled by init cloned = type(self)(**kwargs) # Transfer over the __set_name__ stuff. If `clone` is used to make a # new descriptor, __set_name__ will be called again, overwriting this. cloned._attr_name = self._attr_name cloned._attr_name_private = self._attr_name_private return cloned def __eq__(self, other): """Check Parameter equality. Only equal to other Parameter objects. Returns ------- NotImplemented or True `True` if equal, `NotImplemented` otherwise. This allows `other` to be check for equality with ``other.__eq__``. Examples -------- >>> p1, p2 = Parameter(unit="km"), Parameter(unit="km") >>> p1 == p2 True >>> p3 = Parameter(unit="km / s") >>> p3 == p1 False >>> p1 != 2 True """ if not isinstance(other, Parameter): return NotImplemented # Check equality on all `_init_arguments` & `name`. # Need to compare the processed arguments because the inputs are many- # to-one, e.g. `fvalidate` can be a string or the equivalent function. return ((self._get_init_arguments(True) == other._get_init_arguments(True)) and (self.name == other.name)) def __repr__(self): """String representation. ``eval(repr())`` should work, depending if contents like ``fvalidate`` can be similarly round-tripped. """ return "Parameter({})".format(", ".join(f"{k}={v!r}" for k, v in self._get_init_arguments().items())) # =================================================================== # Built-in validators @Parameter.register_validator("default") def _validate_with_unit(cosmology, param, value): """ Default Parameter value validator. Adds/converts units if Parameter has a unit. """ if param.unit is not None: with u.add_enabled_equivalencies(param.equivalencies): value = u.Quantity(value, param.unit) return value @Parameter.register_validator("float") def _validate_to_float(cosmology, param, value): """Parameter value validator with units, and converted to float.""" value = _validate_with_unit(cosmology, param, value) return float(value) @Parameter.register_validator("scalar") def _validate_to_scalar(cosmology, param, value): """""" value = _validate_with_unit(cosmology, param, value) if not value.isscalar: raise ValueError(f"{param.name} is a non-scalar quantity") return value @Parameter.register_validator("non-negative") def _validate_non_negative(cosmology, param, value): """Parameter value validator where value is a positive float.""" value = _validate_to_float(cosmology, param, value) if value < 0.0: raise ValueError(f"{param.name} cannot be negative.") return value
42007e0caf9620a5c763fe308563fb85342e0fc43c089803cbd67dcd4f2328b1
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Convenience functions for `astropy.cosmology`. """ import warnings import numpy as np from astropy.units import Quantity from astropy.utils.exceptions import AstropyUserWarning from . import units as cu from .core import CosmologyError __all__ = ['z_at_value'] __doctest_requires__ = {'*': ['scipy']} def _z_at_scalar_value(func, fval, zmin=1e-8, zmax=1000, ztol=1e-8, maxfun=500, method='Brent', bracket=None, verbose=False): """ Find the redshift ``z`` at which ``func(z) = fval``. See :func:`astropy.cosmology.funcs.z_at_value`. """ from scipy.optimize import minimize_scalar opt = {'maxiter': maxfun} # Assume custom methods support the same options as default; otherwise user # will see warnings. if str(method).lower() == 'bounded': opt['xatol'] = ztol if bracket is not None: warnings.warn(f"Option 'bracket' is ignored by method {method}.") bracket = None else: opt['xtol'] = ztol # fval falling inside the interval of bracketing function values does not # guarantee it has a unique solution, but for Standard Cosmological # quantities normally should (being monotonic or having a single extremum). # In these cases keep solver from returning solutions outside of bracket. fval_zmin, fval_zmax = func(zmin), func(zmax) nobracket = False if np.sign(fval - fval_zmin) != np.sign(fval_zmax - fval): if bracket is None: nobracket = True else: fval_brac = func(np.asanyarray(bracket)) if np.sign(fval - fval_brac[0]) != np.sign(fval_brac[-1] - fval): nobracket = True else: zmin, zmax = bracket[0], bracket[-1] fval_zmin, fval_zmax = fval_brac[[0, -1]] if nobracket: warnings.warn(f"fval is not bracketed by func(zmin)={fval_zmin} and " f"func(zmax)={fval_zmax}. This means either there is no " "solution, or that there is more than one solution " "between zmin and zmax satisfying fval = func(z).", AstropyUserWarning) if isinstance(fval_zmin, Quantity): val = fval.to_value(fval_zmin.unit) else: val = fval # 'Brent' and 'Golden' ignore `bounds`, force solution inside zlim def f(z): if z > zmax: return 1.e300 * (1.0 + z - zmax) elif z < zmin: return 1.e300 * (1.0 + zmin - z) elif isinstance(fval_zmin, Quantity): return abs(func(z).value - val) else: return abs(func(z) - val) res = minimize_scalar(f, method=method, bounds=(zmin, zmax), bracket=bracket, options=opt) # Scipy docs state that `OptimizeResult` always has 'status' and 'message' # attributes, but only `_minimize_scalar_bounded()` seems to have really # implemented them. if not res.success: warnings.warn(f"Solver returned {res.get('status')}: {res.get('message', 'Unsuccessful')}\n" f"Precision {res.fun} reached after {res.nfev} function calls.", AstropyUserWarning) if verbose: print(res) if np.allclose(res.x, zmax): raise CosmologyError( f"Best guess z={res.x} is very close to the upper z limit {zmax}." "\nTry re-running with a different zmax.") elif np.allclose(res.x, zmin): raise CosmologyError( f"Best guess z={res.x} is very close to the lower z limit {zmin}." "\nTry re-running with a different zmin.") return res.x def z_at_value(func, fval, zmin=1e-8, zmax=1000, ztol=1e-8, maxfun=500, method='Brent', bracket=None, verbose=False): """Find the redshift ``z`` at which ``func(z) = fval``. This finds the redshift at which one of the cosmology functions or methods (for example Planck13.distmod) is equal to a known value. .. warning:: Make sure you understand the behavior of the function that you are trying to invert! Depending on the cosmology, there may not be a unique solution. For example, in the standard Lambda CDM cosmology, there are two redshifts which give an angular diameter distance of 1500 Mpc, z ~ 0.7 and z ~ 3.8. To force ``z_at_value`` to find the solution you are interested in, use the ``zmin`` and ``zmax`` keywords to limit the search range (see the example below). Parameters ---------- func : function or method A function that takes a redshift as input. fval : `~astropy.units.Quantity` The (scalar or array) value of ``func(z)`` to recover. zmin : float or array-like['dimensionless'] or quantity-like, optional The lower search limit for ``z``. Beware of divergences in some cosmological functions, such as distance moduli, at z=0 (default 1e-8). zmax : float or array-like['dimensionless'] or quantity-like, optional The upper search limit for ``z`` (default 1000). ztol : float or array-like['dimensionless'], optional The relative error in ``z`` acceptable for convergence. maxfun : int or array-like, optional The maximum number of function evaluations allowed in the optimization routine (default 500). method : str or callable, optional Type of solver to pass to the minimizer. The built-in options provided by :func:`~scipy.optimize.minimize_scalar` are 'Brent' (default), 'Golden' and 'Bounded' with names case insensitive - see documentation there for details. It also accepts a custom solver by passing any user-provided callable object that meets the requirements listed therein under the Notes on "Custom minimizers" - or in more detail in :doc:`scipy:tutorial/optimize` - although their use is currently untested. .. versionadded:: 4.3 bracket : sequence or object array[sequence], optional For methods 'Brent' and 'Golden', ``bracket`` defines the bracketing interval and can either have three items (z1, z2, z3) so that z1 < z2 < z3 and ``func(z2) < func (z1), func(z3)`` or two items z1 and z3 which are assumed to be a starting interval for a downhill bracket search. For non-monotonic functions such as angular diameter distance this may be used to start the search on the desired side of the maximum, but see Examples below for usage notes. .. versionadded:: 4.3 verbose : bool, optional Print diagnostic output from solver (default `False`). .. versionadded:: 4.3 Returns ------- z : `~astropy.units.Quantity` ['redshift'] The redshift ``z`` satisfying ``zmin < z < zmax`` and ``func(z) = fval`` within ``ztol``. Has units of cosmological redshift. Warns ----- :class:`~astropy.utils.exceptions.AstropyUserWarning` If ``fval`` is not bracketed by ``func(zmin)=fval(zmin)`` and ``func(zmax)=fval(zmax)``. If the solver was not successful. Raises ------ :class:`astropy.cosmology.CosmologyError` If the result is very close to either ``zmin`` or ``zmax``. ValueError If ``bracket`` is not an array nor a 2 (or 3) element sequence. TypeError If ``bracket`` is not an object array. 2 (or 3) element sequences will be turned into object arrays, so this error should only occur if a non-object array is used for ``bracket``. Notes ----- This works for any arbitrary input cosmology, but is inefficient if you want to invert a large number of values for the same cosmology. In this case, it is faster to instead generate an array of values at many closely-spaced redshifts that cover the relevant redshift range, and then use interpolation to find the redshift at each value you are interested in. For example, to efficiently find the redshifts corresponding to 10^6 values of the distance modulus in a Planck13 cosmology, you could do the following: >>> import astropy.units as u >>> from astropy.cosmology import Planck13, z_at_value Generate 10^6 distance moduli between 24 and 44 for which we want to find the corresponding redshifts: >>> Dvals = (24 + np.random.rand(1000000) * 20) * u.mag Make a grid of distance moduli covering the redshift range we need using 50 equally log-spaced values between zmin and zmax. We use log spacing to adequately sample the steep part of the curve at low distance moduli: >>> zmin = z_at_value(Planck13.distmod, Dvals.min()) >>> zmax = z_at_value(Planck13.distmod, Dvals.max()) >>> zgrid = np.geomspace(zmin, zmax, 50) >>> Dgrid = Planck13.distmod(zgrid) Finally interpolate to find the redshift at each distance modulus: >>> zvals = np.interp(Dvals.value, Dgrid.value, zgrid) Examples -------- >>> import astropy.units as u >>> from astropy.cosmology import Planck13, Planck18, z_at_value The age and lookback time are monotonic with redshift, and so a unique solution can be found: >>> z_at_value(Planck13.age, 2 * u.Gyr) # doctest: +FLOAT_CMP <Quantity 3.19812268 redshift> The angular diameter is not monotonic however, and there are two redshifts that give a value of 1500 Mpc. You can use the zmin and zmax keywords to find the one you are interested in: >>> z_at_value(Planck18.angular_diameter_distance, ... 1500 * u.Mpc, zmax=1.5) # doctest: +FLOAT_CMP <Quantity 0.68044452 redshift> >>> z_at_value(Planck18.angular_diameter_distance, ... 1500 * u.Mpc, zmin=2.5) # doctest: +FLOAT_CMP <Quantity 3.7823268 redshift> Alternatively the ``bracket`` option may be used to initialize the function solver on a desired region, but one should be aware that this does not guarantee it will remain close to this starting bracket. For the example of angular diameter distance, which has a maximum near a redshift of 1.6 in this cosmology, defining a bracket on either side of this maximum will often return a solution on the same side: >>> z_at_value(Planck18.angular_diameter_distance, ... 1500 * u.Mpc, bracket=(1.0, 1.2)) # doctest: +FLOAT_CMP +IGNORE_WARNINGS <Quantity 0.68044452 redshift> But this is not ascertained especially if the bracket is chosen too wide and/or too close to the turning point: >>> z_at_value(Planck18.angular_diameter_distance, ... 1500 * u.Mpc, bracket=(0.1, 1.5)) # doctest: +SKIP <Quantity 3.7823268 redshift> # doctest: +SKIP Likewise, even for the same minimizer and same starting conditions different results can be found depending on architecture or library versions: >>> z_at_value(Planck18.angular_diameter_distance, ... 1500 * u.Mpc, bracket=(2.0, 2.5)) # doctest: +SKIP <Quantity 3.7823268 redshift> # doctest: +SKIP >>> z_at_value(Planck18.angular_diameter_distance, ... 1500 * u.Mpc, bracket=(2.0, 2.5)) # doctest: +SKIP <Quantity 0.68044452 redshift> # doctest: +SKIP It is therefore generally safer to use the 3-parameter variant to ensure the solution stays within the bracketing limits: >>> z_at_value(Planck18.angular_diameter_distance, 1500 * u.Mpc, ... bracket=(0.1, 1.0, 1.5)) # doctest: +FLOAT_CMP <Quantity 0.68044452 redshift> Also note that the luminosity distance and distance modulus (two other commonly inverted quantities) are monotonic in flat and open universes, but not in closed universes. All the arguments except ``func``, ``method`` and ``verbose`` accept array inputs. This does NOT use interpolation tables or any method to speed up evaluations, rather providing a convenient means to broadcast arguments over an element-wise scalar evaluation. The most common use case for non-scalar input is to evaluate 'func' for an array of ``fval``: >>> z_at_value(Planck13.age, [2, 7] * u.Gyr) # doctest: +FLOAT_CMP <Quantity [3.19812061, 0.75620443] redshift> ``fval`` can be any shape: >>> z_at_value(Planck13.age, [[2, 7], [1, 3]]*u.Gyr) # doctest: +FLOAT_CMP <Quantity [[3.19812061, 0.75620443], [5.67661227, 2.19131955]] redshift> Other arguments can be arrays. For non-monotic functions -- for example, the angular diameter distance -- this can be useful to find all solutions. >>> z_at_value(Planck13.angular_diameter_distance, 1500 * u.Mpc, ... zmin=[0, 2.5], zmax=[2, 4]) # doctest: +FLOAT_CMP <Quantity [0.68127747, 3.79149062] redshift> The ``bracket`` argument can likewise be be an array. However, since bracket must already be a sequence (or None), it MUST be given as an object `numpy.ndarray`. Importantly, the depth of the array must be such that each bracket subsequence is an object. Errors or unexpected results will happen otherwise. A convenient means to ensure the right depth is by including a length-0 tuple as a bracket and then truncating the object array to remove the placeholder. This can be seen in the following example: >>> bracket=np.array([(1.0, 1.2),(2.0, 2.5), ()], dtype=object)[:-1] >>> z_at_value(Planck18.angular_diameter_distance, 1500 * u.Mpc, ... bracket=bracket) # doctest: +SKIP <Quantity [0.68044452, 3.7823268] redshift> """ # `fval` can be a Quantity, which isn't (yet) compatible w/ `numpy.nditer` # so we strip it of units for broadcasting and restore the units when # passing the elements to `_z_at_scalar_value`. fval = np.asanyarray(fval) unit = getattr(fval, 'unit', 1) # can be unitless zmin = Quantity(zmin, cu.redshift).value # must be unitless zmax = Quantity(zmax, cu.redshift).value # bracket must be an object array (assumed to be correct) or a 'scalar' # bracket: 2 or 3 elt sequence if not isinstance(bracket, np.ndarray): # 'scalar' bracket if bracket is not None and len(bracket) not in (2, 3): raise ValueError("`bracket` is not an array " "nor a 2 (or 3) element sequence.") else: # munge bracket into a 1-elt object array bracket = np.array([bracket, ()], dtype=object)[:1].squeeze() if bracket.dtype != np.object_: raise TypeError(f"`bracket` has dtype {bracket.dtype}, not 'O'") # make multi-dimensional iterator for all but `method`, `verbose` with np.nditer( [fval, zmin, zmax, ztol, maxfun, bracket, None], flags=['refs_ok'], op_flags=[*[['readonly']] * 6, # ← inputs output ↓ ['writeonly', 'allocate', 'no_subtype']], op_dtypes=(*(None,)*6, fval.dtype), casting="no", ) as it: for fv, zmn, zmx, zt, mfe, bkt, zs in it: # ← eltwise unpack & eval ↓ zs[...] = _z_at_scalar_value(func, fv * unit, zmin=zmn, zmax=zmx, ztol=zt, maxfun=mfe, bracket=bkt.item(), # not broadcasted method=method, verbose=verbose) # since bracket is an object array, the output will be too, so it is # cast to the same type as the function value. result = it.operands[-1] # zs return result << cu.redshift
525867e54438833265d9139bcdf0b8f116e98c4c3dd5b961a0edd36d706f174b
# Licensed under a 3-clause BSD style license - see LICENSE.rst import functools from math import inf from numbers import Number import numpy as np from astropy.units import Quantity from astropy.utils import isiterable from astropy.utils.decorators import deprecated from . import units as cu __all__ = [] # nothing is publicly scoped __doctest_skip__ = ["inf_like", "vectorize_if_needed"] def vectorize_redshift_method(func=None, nin=1): """Vectorize a method of redshift(s). Parameters ---------- func : callable or None method to wrap. If `None` returns a :func:`functools.partial` with ``nin`` loaded. nin : int Number of positional redshift arguments. Returns ------- wrapper : callable :func:`functools.wraps` of ``func`` where the first ``nin`` arguments are converted from |Quantity| to :class:`numpy.ndarray`. """ # allow for pie-syntax & setting nin if func is None: return functools.partial(vectorize_redshift_method, nin=nin) @functools.wraps(func) def wrapper(self, *args, **kwargs): """ :func:`functools.wraps` of ``func`` where the first ``nin`` arguments are converted from |Quantity| to `numpy.ndarray` or scalar. """ # process inputs # TODO! quantity-aware vectorization can simplify this. zs = [z if not isinstance(z, Quantity) else z.to_value(cu.redshift) for z in args[:nin]] # scalar inputs if all(isinstance(z, (Number, np.generic)) for z in zs): return func(self, *zs, *args[nin:], **kwargs) # non-scalar. use vectorized func return wrapper.__vectorized__(self, *zs, *args[nin:], **kwargs) wrapper.__vectorized__ = np.vectorize(func) # attach vectorized function # TODO! use frompyfunc when can solve return type errors return wrapper @deprecated( since="5.0", message="vectorize_if_needed has been removed because it constructs a new ufunc on each call", alternative="use a pre-vectorized function instead for a target array 'z'" ) def vectorize_if_needed(f, *x, **vkw): """Helper function to vectorize scalar functions on array inputs. Parameters ---------- f : callable 'f' must accept positional arguments and no mandatory keyword arguments. *x Arguments into ``f``. **vkw Keyword arguments into :class:`numpy.vectorize`. Examples -------- >>> func = lambda x: x ** 2 >>> vectorize_if_needed(func, 2) 4 >>> vectorize_if_needed(func, [2, 3]) array([4, 9]) """ return np.vectorize(f, **vkw)(*x) if any(map(isiterable, x)) else f(*x) @deprecated( since="5.0", message=("inf_like has been removed because it duplicates " "functionality provided by numpy.full_like()"), alternative="Use numpy.full_like(z, numpy.inf) instead for a target array 'z'" ) def inf_like(x): """Return the shape of x with value infinity and dtype='float'. Preserves 'shape' for both array and scalar inputs. But always returns a float array, even if x is of integer type. Parameters ---------- x : scalar or array-like Must work with functions `numpy.isscalar` and `numpy.full_like` (if `x` is not a scalar` Returns ------- `math.inf` or ndarray[float] thereof Returns a scalar `~math.inf` if `x` is a scalar, an array of floats otherwise. Examples -------- >>> inf_like(0.) # float scalar inf >>> inf_like(1) # integer scalar should give float output inf >>> inf_like([0., 1., 2., 3.]) # float list array([inf, inf, inf, inf]) >>> inf_like([0, 1, 2, 3]) # integer list should give float output array([inf, inf, inf, inf]) """ return inf if np.isscalar(x) else np.full_like(x, inf, dtype=float) def aszarr(z): """ Redshift as a `~numbers.Number` or `~numpy.ndarray` / |Quantity| / |Column|. Allows for any ndarray ducktype by checking for attribute "shape". """ if isinstance(z, (Number, np.generic)): # scalars return z elif hasattr(z, "shape"): # ducktypes NumPy array if hasattr(z, "unit"): # Quantity Column return (z << cu.redshift).value # for speed only use enabled equivs return z # not one of the preferred types: Number / array ducktype return Quantity(z, cu.redshift).value
a7c49aeae2599dec37dec63934bfa472a6b60f992f237907c917bd4609dd3a4e
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """Cosmological units and equivalencies. """ # (newline needed for unit summary) import astropy.units as u from astropy.units.utils import generate_unit_summary as _generate_unit_summary __all__ = ["littleh", "redshift", # redshift equivalencies "dimensionless_redshift", "with_redshift", "redshift_distance", "redshift_hubble", "redshift_temperature", # other equivalencies "with_H0"] __doctest_requires__ = {('with_redshift', 'redshift_distance'): ['scipy']} _ns = globals() ############################################################################### # Cosmological Units # This is not formally a unit, but is used in that way in many contexts, and # an appropriate equivalency is only possible if it's treated as a unit. redshift = u.def_unit(['redshift'], prefixes=False, namespace=_ns, doc="Cosmological redshift.", format={'latex': r''}) # This is not formally a unit, but is used in that way in many contexts, and # an appropriate equivalency is only possible if it's treated as a unit (see # https://arxiv.org/pdf/1308.4150.pdf for more) # Also note that h or h100 or h_100 would be a better name, but they either # conflict or have numbers in them, which is disallowed littleh = u.def_unit(['littleh'], namespace=_ns, prefixes=False, doc='Reduced/"dimensionless" Hubble constant', format={'latex': r'h_{100}'}) ############################################################################### # Equivalencies def dimensionless_redshift(): """Allow redshift to be 1-to-1 equivalent to dimensionless. It is special compared to other equivalency pairs in that it allows this independent of the power to which the redshift is raised, and independent of whether it is part of a more complicated unit. It is similar to u.dimensionless_angles() in this respect. """ return u.Equivalency([(redshift, None)], "dimensionless_redshift") def redshift_distance(cosmology=None, kind="comoving", **atzkw): """Convert quantities between redshift and distance. Care should be taken to not misinterpret a relativistic, gravitational, etc redshift as a cosmological one. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional A cosmology realization or built-in cosmology's name (e.g. 'Planck18'). If None, will use the default cosmology (controlled by :class:`~astropy.cosmology.default_cosmology`). kind : {'comoving', 'lookback', 'luminosity'} or None, optional The distance type for the Equivalency. Note this does NOT include the angular diameter distance as this distance measure is not monotonic. **atzkw keyword arguments for :func:`~astropy.cosmology.z_at_value` Returns ------- `~astropy.units.equivalencies.Equivalency` Equivalency between redshift and temperature. Examples -------- >>> import astropy.units as u >>> import astropy.cosmology.units as cu >>> from astropy.cosmology import WMAP9 >>> z = 1100 * cu.redshift >>> z.to(u.Mpc, cu.redshift_distance(WMAP9, kind="comoving")) # doctest: +FLOAT_CMP <Quantity 14004.03157418 Mpc> """ from astropy.cosmology import default_cosmology, z_at_value # get cosmology: None -> default and process str / class cosmology = cosmology if cosmology is not None else default_cosmology.get() with default_cosmology.set(cosmology): # if already cosmo, passes through cosmology = default_cosmology.get() allowed_kinds = ('comoving', 'lookback', 'luminosity') if kind not in allowed_kinds: raise ValueError(f"`kind` is not one of {allowed_kinds}") method = getattr(cosmology, kind + "_distance") def z_to_distance(z): """Redshift to distance.""" return method(z) def distance_to_z(d): """Distance to redshift.""" return z_at_value(method, d << u.Mpc, **atzkw) return u.Equivalency([(redshift, u.Mpc, z_to_distance, distance_to_z)], "redshift_distance", {'cosmology': cosmology, "distance": kind}) def redshift_hubble(cosmology=None, **atzkw): """Convert quantities between redshift and Hubble parameter and little-h. Care should be taken to not misinterpret a relativistic, gravitational, etc redshift as a cosmological one. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional A cosmology realization or built-in cosmology's name (e.g. 'Planck18'). If None, will use the default cosmology (controlled by :class:`~astropy.cosmology.default_cosmology`). **atzkw keyword arguments for :func:`~astropy.cosmology.z_at_value` Returns ------- `~astropy.units.equivalencies.Equivalency` Equivalency between redshift and Hubble parameter and little-h unit. Examples -------- >>> import astropy.units as u >>> import astropy.cosmology.units as cu >>> from astropy.cosmology import WMAP9 >>> z = 1100 * cu.redshift >>> equivalency = cu.redshift_hubble(WMAP9) # construct equivalency >>> z.to(u.km / u.s / u.Mpc, equivalency) # doctest: +FLOAT_CMP <Quantity 1565637.40154275 km / (Mpc s)> >>> z.to(cu.littleh, equivalency) # doctest: +FLOAT_CMP <Quantity 15656.37401543 littleh> """ from astropy.cosmology import default_cosmology, z_at_value # get cosmology: None -> default and process str / class cosmology = cosmology if cosmology is not None else default_cosmology.get() with default_cosmology.set(cosmology): # if already cosmo, passes through cosmology = default_cosmology.get() def z_to_hubble(z): """Redshift to Hubble parameter.""" return cosmology.H(z) def hubble_to_z(H): """Hubble parameter to redshift.""" return z_at_value(cosmology.H, H << (u.km / u.s / u.Mpc), **atzkw) def z_to_littleh(z): """Redshift to :math:`h`-unit Quantity.""" return z_to_hubble(z).to_value(u.km / u.s / u.Mpc) / 100 * littleh def littleh_to_z(h): """:math:`h`-unit Quantity to redshift.""" return hubble_to_z(h * 100) return u.Equivalency([(redshift, u.km / u.s / u.Mpc, z_to_hubble, hubble_to_z), (redshift, littleh, z_to_littleh, littleh_to_z)], "redshift_hubble", {'cosmology': cosmology}) def redshift_temperature(cosmology=None, **atzkw): """Convert quantities between redshift and CMB temperature. Care should be taken to not misinterpret a relativistic, gravitational, etc redshift as a cosmological one. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional A cosmology realization or built-in cosmology's name (e.g. 'Planck18'). If None, will use the default cosmology (controlled by :class:`~astropy.cosmology.default_cosmology`). **atzkw keyword arguments for :func:`~astropy.cosmology.z_at_value` Returns ------- `~astropy.units.equivalencies.Equivalency` Equivalency between redshift and temperature. Examples -------- >>> import astropy.units as u >>> import astropy.cosmology.units as cu >>> from astropy.cosmology import WMAP9 >>> z = 1100 * cu.redshift >>> z.to(u.K, cu.redshift_temperature(WMAP9)) <Quantity 3000.225 K> """ from astropy.cosmology import default_cosmology, z_at_value # get cosmology: None -> default and process str / class cosmology = cosmology if cosmology is not None else default_cosmology.get() with default_cosmology.set(cosmology): # if already cosmo, passes through cosmology = default_cosmology.get() def z_to_Tcmb(z): return cosmology.Tcmb(z) def Tcmb_to_z(T): return z_at_value(cosmology.Tcmb, T << u.K, **atzkw) return u.Equivalency([(redshift, u.K, z_to_Tcmb, Tcmb_to_z)], "redshift_temperature", {'cosmology': cosmology}) def with_redshift(cosmology=None, *, distance="comoving", hubble=True, Tcmb=True, atzkw=None): """Convert quantities between measures of cosmological distance. Note: by default all equivalencies are on and must be explicitly turned off. Care should be taken to not misinterpret a relativistic, gravitational, etc redshift as a cosmological one. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional A cosmology realization or built-in cosmology's name (e.g. 'Planck18'). If `None`, will use the default cosmology (controlled by :class:`~astropy.cosmology.default_cosmology`). distance : {'comoving', 'lookback', 'luminosity'} or None (optional, keyword-only) The type of distance equivalency to create or `None`. Default is 'comoving'. hubble : bool (optional, keyword-only) Whether to create a Hubble parameter <-> redshift equivalency, using ``Cosmology.H``. Default is `True`. Tcmb : bool (optional, keyword-only) Whether to create a CMB temperature <-> redshift equivalency, using ``Cosmology.Tcmb``. Default is `True`. atzkw : dict or None (optional, keyword-only) keyword arguments for :func:`~astropy.cosmology.z_at_value` Returns ------- `~astropy.units.equivalencies.Equivalency` With equivalencies between redshift and distance / Hubble / temperature. Examples -------- >>> import astropy.units as u >>> import astropy.cosmology.units as cu >>> from astropy.cosmology import WMAP9 >>> equivalency = cu.with_redshift(WMAP9) >>> z = 1100 * cu.redshift Redshift to (comoving) distance: >>> z.to(u.Mpc, equivalency) # doctest: +FLOAT_CMP <Quantity 14004.03157418 Mpc> Redshift to the Hubble parameter: >>> z.to(u.km / u.s / u.Mpc, equivalency) # doctest: +FLOAT_CMP <Quantity 1565637.40154275 km / (Mpc s)> >>> z.to(cu.littleh, equivalency) # doctest: +FLOAT_CMP <Quantity 15656.37401543 littleh> Redshift to CMB temperature: >>> z.to(u.K, equivalency) <Quantity 3000.225 K> """ from astropy.cosmology import default_cosmology # get cosmology: None -> default and process str / class cosmology = cosmology if cosmology is not None else default_cosmology.get() with default_cosmology.set(cosmology): # if already cosmo, passes through cosmology = default_cosmology.get() atzkw = atzkw if atzkw is not None else {} equivs = [] # will append as built # Hubble <-> Redshift if hubble: equivs.extend(redshift_hubble(cosmology, **atzkw)) # CMB Temperature <-> Redshift if Tcmb: equivs.extend(redshift_temperature(cosmology, **atzkw)) # Distance <-> Redshift, but need to choose which distance if distance is not None: equivs.extend(redshift_distance(cosmology, kind=distance, **atzkw)) # ----------- return u.Equivalency(equivs, "with_redshift", {'cosmology': cosmology, 'distance': distance, 'hubble': hubble, 'Tcmb': Tcmb}) # =================================================================== def with_H0(H0=None): """ Convert between quantities with little-h and the equivalent physical units. Parameters ---------- H0 : None or `~astropy.units.Quantity` ['frequency'] The value of the Hubble constant to assume. If a `~astropy.units.Quantity`, will assume the quantity *is* ``H0``. If `None` (default), use the ``H0`` attribute from :mod:`~astropy.cosmology.default_cosmology`. References ---------- For an illuminating discussion on why you may or may not want to use little-h at all, see https://arxiv.org/pdf/1308.4150.pdf """ if H0 is None: from .realizations import default_cosmology H0 = default_cosmology.get().H0 h100_val_unit = u.Unit(100 / (H0.to_value((u.km / u.s) / u.Mpc)) * littleh) return u.Equivalency([(h100_val_unit, None)], "with_H0", kwargs={"H0": H0}) # =================================================================== # Enable the set of default equivalencies. # If the cosmology package is imported, this is added to the list astropy-wide. u.add_enabled_equivalencies(dimensionless_redshift()) # ============================================================================= # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. if __doc__ is not None: __doc__ += _generate_unit_summary(_ns)
62e7e54582e9bd41c7370e85a187f928f3d3812795a649830af72a8c7d51e2bc
# Licensed under a 3-clause BSD style license - see LICENSE.rst # STDLIB import pathlib import sys from typing import Optional, Union # LOCAL from astropy.utils.data import get_pkg_data_path from astropy.utils.decorators import deprecated from astropy.utils.state import ScienceState from .core import Cosmology _COSMOLOGY_DATA_DIR = pathlib.Path(get_pkg_data_path("cosmology", "data", package="astropy")) available = tuple(sorted([p.stem for p in _COSMOLOGY_DATA_DIR.glob("*.ecsv")])) __all__ = ["available", "default_cosmology"] + list(available) __doctest_requires__ = {"*": ["scipy"]} def __getattr__(name): """Make specific realizations from data files with lazy import from `PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_. Raises ------ AttributeError If "name" is not in :mod:`astropy.cosmology.realizations` """ if name not in available: raise AttributeError(f"module {__name__!r} has no attribute {name!r}.") cosmo = Cosmology.read(str(_COSMOLOGY_DATA_DIR / name) + ".ecsv", format="ascii.ecsv") cosmo.__doc__ = (f"{name} instance of {cosmo.__class__.__qualname__} " f"cosmology\n(from {cosmo.meta['reference']})") # Cache in this module so `__getattr__` is only called once per `name`. setattr(sys.modules[__name__], name, cosmo) return cosmo def __dir__(): """Directory, including lazily-imported objects.""" return __all__ ######################################################################### # The science state below contains the current cosmology. ######################################################################### class default_cosmology(ScienceState): """The default cosmology to use. To change it:: >>> from astropy.cosmology import default_cosmology, WMAP7 >>> with default_cosmology.set(WMAP7): ... # WMAP7 cosmology in effect ... pass Or, you may use a string:: >>> with default_cosmology.set('WMAP7'): ... # WMAP7 cosmology in effect ... pass To get the default cosmology: >>> default_cosmology.get() FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, ... """ _default_value = "Planck18" _value = "Planck18" @deprecated("5.0", alternative="get") @classmethod def get_cosmology_from_string(cls, arg): """Return a cosmology instance from a string.""" if arg == "no_default": value = None else: value = cls._get_from_registry(arg) return value @classmethod def validate(cls, value: Union[Cosmology, str, None]) -> Optional[Cosmology]: """Return a Cosmology given a value. Parameters ---------- value : None, str, or `~astropy.cosmology.Cosmology` Returns ------- `~astropy.cosmology.Cosmology` instance Raises ------ TypeError If ``value`` is not a string or |Cosmology|. """ # None -> default if value is None: value = cls._default_value # Parse to Cosmology. Error if cannot. if isinstance(value, str): # special-case one string if value == "no_default": value = None else: value = cls._get_from_registry(value) elif not isinstance(value, Cosmology): raise TypeError("default_cosmology must be a string or Cosmology instance, " f"not {value}.") return value @classmethod def _get_from_registry(cls, name: str) -> Cosmology: """Get a registered Cosmology realization. Parameters ---------- name : str The built-in |Cosmology| realization to retrieve. Returns ------- `astropy.cosmology.Cosmology` The cosmology realization of `name`. Raises ------ ValueError If ``name`` is a str, but not for a built-in Cosmology. TypeError If ``name`` is for a non-Cosmology object. """ try: value = getattr(sys.modules[__name__], name) except AttributeError: raise ValueError(f"Unknown cosmology {name!r}. " f"Valid cosmologies:\n{available}") if not isinstance(value, Cosmology): raise TypeError(f"cannot find a Cosmology realization called {name}.") return value
4291be6e62d5ffa89c28e4a7827347efc340e29b9ec1de2b1678b9772c652de7
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ ``showtable`` is a command-line script based on ``astropy.io`` and ``astropy.table`` for printing ASCII, FITS, HDF5 or VOTable files(s) to the standard output. Example usage of ``showtable``: 1. FITS:: $ showtable astropy/io/fits/tests/data/table.fits target V_mag ------- ----- NGC1001 11.1 NGC1002 12.3 NGC1003 15.2 2. ASCII:: $ showtable astropy/io/ascii/tests/t/simple_csv.csv a b c --- --- --- 1 2 3 4 5 6 3. XML:: $ showtable astropy/io/votable/tests/data/names.xml --max-width 70 col1 col2 col3 ... col15 col16 col17 --- deg deg ... mag mag --- ------------------------- -------- ------- ... ----- ----- ----- SSTGLMC G000.0000+00.1611 0.0000 0.1611 ... -- -- AA 4. Print all the FITS tables in the current directory:: $ showtable *.fits """ import argparse import textwrap import warnings from astropy import log from astropy.table import Table from astropy.utils.exceptions import AstropyUserWarning def showtable(filename, args): """ Read a table and print to the standard output. Parameters ---------- filename : str The path to a FITS file. """ if args.info and args.stats: warnings.warn('--info and --stats cannot be used together', AstropyUserWarning) if (any((args.max_lines, args.max_width, args.hide_unit, args.show_dtype)) and (args.info or args.stats)): warnings.warn('print parameters are ignored if --info or --stats is ' 'used', AstropyUserWarning) # these parameters are passed to Table.read if they are specified in the # command-line read_kwargs = ('hdu', 'format', 'table_id', 'delimiter') kwargs = {k: v for k, v in vars(args).items() if k in read_kwargs and v is not None} try: table = Table.read(filename, **kwargs) if args.info: table.info('attributes') elif args.stats: table.info('stats') else: formatter = table.more if args.more else table.pprint formatter(max_lines=args.max_lines, max_width=args.max_width, show_unit=(False if args.hide_unit else None), show_dtype=(True if args.show_dtype else None)) except IOError as e: log.error(str(e)) def main(args=None): """The main function called by the `showtable` script.""" parser = argparse.ArgumentParser( description=textwrap.dedent(""" Print tables from ASCII, FITS, HDF5, VOTable file(s). The tables are read with 'astropy.table.Table.read' and are printed with 'astropy.table.Table.pprint'. The default behavior is to make the table output fit onto a single screen page. For a long and wide table this will mean cutting out inner rows and columns. To print **all** the rows or columns use ``--max-lines=-1`` or ``max-width=-1``, respectively. The complete list of supported formats can be found at http://astropy.readthedocs.io/en/latest/io/unified.html#built-in-table-readers-writers """)) addarg = parser.add_argument addarg('filename', nargs='+', help='path to one or more files') addarg('--format', help='input table format, should be specified if it ' 'cannot be automatically detected') addarg('--more', action='store_true', help='use the pager mode from Table.more') addarg('--info', action='store_true', help='show information about the table columns') addarg('--stats', action='store_true', help='show statistics about the table columns') # pprint arguments pprint_args = parser.add_argument_group('pprint arguments') addarg = pprint_args.add_argument addarg('--max-lines', type=int, help='maximum number of lines in table output (default=screen ' 'length, -1 for no limit)') addarg('--max-width', type=int, help='maximum width in table output (default=screen width, ' '-1 for no limit)') addarg('--hide-unit', action='store_true', help='hide the header row for unit (which is shown ' 'only if one or more columns has a unit)') addarg('--show-dtype', action='store_true', help='always include a header row for column dtypes ' '(otherwise shown only if any column is multidimensional)') # ASCII-specific arguments ascii_args = parser.add_argument_group('ASCII arguments') addarg = ascii_args.add_argument addarg('--delimiter', help='column delimiter string') # FITS-specific arguments fits_args = parser.add_argument_group('FITS arguments') addarg = fits_args.add_argument addarg('--hdu', help='name of the HDU to show') # HDF5-specific arguments hdf5_args = parser.add_argument_group('HDF5 arguments') addarg = hdf5_args.add_argument addarg('--path', help='the path from which to read the table') # VOTable-specific arguments votable_args = parser.add_argument_group('VOTable arguments') addarg = votable_args.add_argument addarg('--table-id', help='the table to read in') args = parser.parse_args(args) for idx, filename in enumerate(args.filename): if idx > 0: print() showtable(filename, args)
dba10680844500c7bfcb252f9df7a0e2009d283f49c59959d2ca736fe8be8ea6
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy.utils.tests.test_metadata import MetaBaseTest import gc import os import sys import copy from io import StringIO from collections import OrderedDict import pathlib import pickle import pytest import numpy as np from numpy.testing import assert_allclose, assert_array_equal from astropy.io import fits from astropy.table import (Table, QTable, Column, MaskedColumn, TableReplaceWarning, TableAttribute) from astropy.tests.helper import assert_follows_unicode_guidelines from astropy.coordinates import SkyCoord from astropy.utils.data import get_pkg_data_filename from astropy.utils.exceptions import AstropyUserWarning from astropy import table from astropy import units as u from astropy.time import Time, TimeDelta from .conftest import MaskedTable, MIXIN_COLS from astropy.utils.compat.optional_deps import HAS_PANDAS # noqa @pytest.fixture def home_is_tmpdir(monkeypatch, tmpdir): """ Pytest fixture to run a test case with tilde-prefixed paths. In the tilde-path case, environment variables are temporarily modified so that '~' resolves to the temp directory. """ # For Unix monkeypatch.setenv('HOME', str(tmpdir)) # For Windows monkeypatch.setenv('USERPROFILE', str(tmpdir)) class SetupData: def _setup(self, table_types): self._table_type = table_types.Table self._column_type = table_types.Column @property def a(self): if self._column_type is not None: if not hasattr(self, '_a'): self._a = self._column_type( [1, 2, 3], name='a', format='%d', meta={'aa': [0, 1, 2, 3, 4]}) return self._a @property def b(self): if self._column_type is not None: if not hasattr(self, '_b'): self._b = self._column_type( [4, 5, 6], name='b', format='%d', meta={'aa': 1}) return self._b @property def c(self): if self._column_type is not None: if not hasattr(self, '_c'): self._c = self._column_type([7, 8, 9], 'c') return self._c @property def d(self): if self._column_type is not None: if not hasattr(self, '_d'): self._d = self._column_type([7, 8, 7], 'd') return self._d @property def obj(self): if self._column_type is not None: if not hasattr(self, '_obj'): self._obj = self._column_type([1, 'string', 3], 'obj', dtype='O') return self._obj @property def t(self): if self._table_type is not None: if not hasattr(self, '_t'): self._t = self._table_type([self.a, self.b]) return self._t @pytest.mark.usefixtures('table_types') class TestSetTableColumn(SetupData): def test_set_row(self, table_types): """Set a row from a tuple of values""" self._setup(table_types) t = table_types.Table([self.a, self.b]) t[1] = (20, 21) assert t['a'][0] == 1 assert t['a'][1] == 20 assert t['a'][2] == 3 assert t['b'][0] == 4 assert t['b'][1] == 21 assert t['b'][2] == 6 def test_set_row_existing(self, table_types): """Set a row from another existing row""" self._setup(table_types) t = table_types.Table([self.a, self.b]) t[0] = t[1] assert t[0][0] == 2 assert t[0][1] == 5 def test_set_row_fail_1(self, table_types): """Set a row from an incorrectly-sized or typed set of values""" self._setup(table_types) t = table_types.Table([self.a, self.b]) with pytest.raises(ValueError): t[1] = (20, 21, 22) with pytest.raises(ValueError): t[1] = 0 def test_set_row_fail_2(self, table_types): """Set a row from an incorrectly-typed tuple of values""" self._setup(table_types) t = table_types.Table([self.a, self.b]) with pytest.raises(ValueError): t[1] = ('abc', 'def') def test_set_new_col_new_table(self, table_types): """Create a new column in empty table using the item access syntax""" self._setup(table_types) t = table_types.Table() t['aa'] = self.a # Test that the new column name is 'aa' and that the values match assert np.all(t['aa'] == self.a) assert t.colnames == ['aa'] def test_set_new_col_new_table_quantity(self, table_types): """Create a new column (from a quantity) in empty table using the item access syntax""" self._setup(table_types) t = table_types.Table() t['aa'] = np.array([1, 2, 3]) * u.m assert np.all(t['aa'] == np.array([1, 2, 3])) assert t['aa'].unit == u.m t['bb'] = 3 * u.m assert np.all(t['bb'] == 3) assert t['bb'].unit == u.m def test_set_new_col_existing_table(self, table_types): """Create a new column in an existing table using the item access syntax""" self._setup(table_types) t = table_types.Table([self.a]) # Add a column t['bb'] = self.b assert np.all(t['bb'] == self.b) assert t.colnames == ['a', 'bb'] assert t['bb'].meta == self.b.meta assert t['bb'].format == self.b.format # Add another column t['c'] = t['a'] assert np.all(t['c'] == t['a']) assert t.colnames == ['a', 'bb', 'c'] assert t['c'].meta == t['a'].meta assert t['c'].format == t['a'].format # Add a multi-dimensional column t['d'] = table_types.Column(np.arange(12).reshape(3, 2, 2)) assert t['d'].shape == (3, 2, 2) assert t['d'][0, 0, 1] == 1 # Add column from a list t['e'] = ['hello', 'the', 'world'] assert np.all(t['e'] == np.array(['hello', 'the', 'world'])) # Make sure setting existing column still works t['e'] = ['world', 'hello', 'the'] assert np.all(t['e'] == np.array(['world', 'hello', 'the'])) # Add a column via broadcasting t['f'] = 10 assert np.all(t['f'] == 10) # Add a column from a Quantity t['g'] = np.array([1, 2, 3]) * u.m assert np.all(t['g'].data == np.array([1, 2, 3])) assert t['g'].unit == u.m # Add a column from a (scalar) Quantity t['g'] = 3 * u.m assert np.all(t['g'].data == 3) assert t['g'].unit == u.m def test_set_new_unmasked_col_existing_table(self, table_types): """Create a new column in an existing table using the item access syntax""" self._setup(table_types) t = table_types.Table([self.a]) # masked or unmasked b = table.Column(name='b', data=[1, 2, 3]) # unmasked t['b'] = b assert np.all(t['b'] == b) def test_set_new_masked_col_existing_table(self, table_types): """Create a new column in an existing table using the item access syntax""" self._setup(table_types) t = table_types.Table([self.a]) # masked or unmasked b = table.MaskedColumn(name='b', data=[1, 2, 3]) # masked t['b'] = b assert np.all(t['b'] == b) def test_set_new_col_existing_table_fail(self, table_types): """Generate failure when creating a new column using the item access syntax""" self._setup(table_types) t = table_types.Table([self.a]) # Wrong size with pytest.raises(ValueError): t['b'] = [1, 2] @pytest.mark.usefixtures('table_types') class TestEmptyData(): def test_1(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='a', dtype=int, length=100)) assert len(t['a']) == 100 def test_2(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='a', dtype=int, shape=(3, ), length=100)) assert len(t['a']) == 100 def test_3(self, table_types): t = table_types.Table() # length is not given t.add_column(table_types.Column(name='a', dtype=int)) assert len(t['a']) == 0 def test_4(self, table_types): t = table_types.Table() # length is not given t.add_column(table_types.Column(name='a', dtype=int, shape=(3, 4))) assert len(t['a']) == 0 def test_5(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='a')) # dtype is not specified assert len(t['a']) == 0 def test_scalar(self, table_types): """Test related to #3811 where setting empty tables to scalar values should raise an error instead of having an error raised when accessing the table.""" t = table_types.Table() with pytest.raises(TypeError, match='Empty table cannot have column set to scalar value'): t.add_column(0) def test_add_via_setitem_and_slice(self, table_types): """Test related to #3023 where a MaskedColumn is created with name=None and then gets changed to name='a'. After PR #2790 this test fails without the #3023 fix.""" t = table_types.Table() t['a'] = table_types.Column([1, 2, 3]) t2 = t[:] assert t2.colnames == t.colnames @pytest.mark.usefixtures('table_types') class TestNewFromColumns(): def test_simple(self, table_types): cols = [table_types.Column(name='a', data=[1, 2, 3]), table_types.Column(name='b', data=[4, 5, 6], dtype=np.float32)] t = table_types.Table(cols) assert np.all(t['a'].data == np.array([1, 2, 3])) assert np.all(t['b'].data == np.array([4, 5, 6], dtype=np.float32)) assert type(t['b'][1]) is np.float32 def test_from_np_array(self, table_types): cols = [table_types.Column(name='a', data=np.array([1, 2, 3], dtype=np.int64), dtype=np.float64), table_types.Column(name='b', data=np.array([4, 5, 6], dtype=np.float32))] t = table_types.Table(cols) assert np.all(t['a'] == np.array([1, 2, 3], dtype=np.float64)) assert np.all(t['b'] == np.array([4, 5, 6], dtype=np.float32)) assert type(t['a'][1]) is np.float64 assert type(t['b'][1]) is np.float32 def test_size_mismatch(self, table_types): cols = [table_types.Column(name='a', data=[1, 2, 3]), table_types.Column(name='b', data=[4, 5, 6, 7])] with pytest.raises(ValueError): table_types.Table(cols) def test_name_none(self, table_types): """Column with name=None can init a table whether or not names are supplied""" c = table_types.Column(data=[1, 2], name='c') d = table_types.Column(data=[3, 4]) t = table_types.Table([c, d], names=(None, 'd')) assert t.colnames == ['c', 'd'] t = table_types.Table([c, d]) assert t.colnames == ['c', 'col1'] @pytest.mark.usefixtures('table_types') class TestReverse(): def test_reverse(self, table_types): t = table_types.Table([[1, 2, 3], ['a', 'b', 'cc']]) t.reverse() assert np.all(t['col0'] == np.array([3, 2, 1])) assert np.all(t['col1'] == np.array(['cc', 'b', 'a'])) t2 = table_types.Table(t, copy=False) assert np.all(t2['col0'] == np.array([3, 2, 1])) assert np.all(t2['col1'] == np.array(['cc', 'b', 'a'])) t2 = table_types.Table(t, copy=True) assert np.all(t2['col0'] == np.array([3, 2, 1])) assert np.all(t2['col1'] == np.array(['cc', 'b', 'a'])) t2.sort('col0') assert np.all(t2['col0'] == np.array([1, 2, 3])) assert np.all(t2['col1'] == np.array(['a', 'b', 'cc'])) def test_reverse_big(self, table_types): x = np.arange(10000) y = x + 1 t = table_types.Table([x, y], names=('x', 'y')) t.reverse() assert np.all(t['x'] == x[::-1]) assert np.all(t['y'] == y[::-1]) def test_reverse_mixin(self): """Test reverse for a mixin with no item assignment, fix for #9836""" sc = SkyCoord([1, 2], [3, 4], unit='deg') t = Table([[2, 1], sc], names=['a', 'sc']) t.reverse() assert np.all(t['a'] == [1, 2]) assert np.allclose(t['sc'].ra.to_value('deg'), [2, 1]) @pytest.mark.usefixtures('table_types') class TestRound(): def test_round_int(self, table_types): t = table_types.Table([['a', 'b', 'c'], [1.11, 2.3, 3.0], [1.123456, 2.9876, 3.901]]) t.round() assert np.all(t['col0'] == ['a', 'b', 'c']) assert np.all(t['col1'] == [1., 2., 3.]) assert np.all(t['col2'] == [1., 3., 4.]) def test_round_dict(self, table_types): t = table_types.Table([['a', 'b', 'c'], [1.5, 2.5, 3.0111], [1.123456, 2.9876, 3.901]]) t.round({'col1': 0, 'col2': 3}) assert np.all(t['col0'] == ['a', 'b', 'c']) assert np.all(t['col1'] == [2.0, 2.0, 3.0]) assert np.all(t['col2'] == [1.123, 2.988, 3.901]) def test_round_invalid(self, table_types): t = table_types.Table([[1, 2, 3]]) with pytest.raises(ValueError, match="'decimals' argument must be an int or a dict"): t.round(0.5) def test_round_kind(self, table_types): for typecode in 'bBhHiIlLqQpPefdgFDG': # AllInteger, AllFloat arr = np.array([4, 16], dtype=typecode) t = Table([arr]) col0 = t['col0'] t.round(decimals=-1) # Round to nearest 10 assert np.all(t['col0'] == [0, 20]) assert t['col0'] is col0 @pytest.mark.usefixtures('table_types') class TestColumnAccess(): def test_1(self, table_types): t = table_types.Table() with pytest.raises(KeyError): t['a'] def test_2(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='a', data=[1, 2, 3])) assert np.all(t['a'] == np.array([1, 2, 3])) with pytest.raises(KeyError): t['b'] # column does not exist def test_itercols(self, table_types): names = ['a', 'b', 'c'] t = table_types.Table([[1], [2], [3]], names=names) for name, col in zip(names, t.itercols()): assert name == col.name assert isinstance(col, table_types.Column) @pytest.mark.usefixtures('table_types') class TestAddLength(SetupData): def test_right_length(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) t.add_column(self.b) def test_too_long(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) with pytest.raises(ValueError): t.add_column(table_types.Column(name='b', data=[4, 5, 6, 7])) # data too long def test_too_short(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) with pytest.raises(ValueError): t.add_column(table_types.Column(name='b', data=[4, 5])) # data too short @pytest.mark.usefixtures('table_types') class TestAddPosition(SetupData): def test_1(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a, 0) def test_2(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a, 1) def test_3(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a, -1) def test_5(self, table_types): self._setup(table_types) t = table_types.Table() with pytest.raises(ValueError): t.index_column('b') def test_6(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a) t.add_column(self.b) assert t.colnames == ['a', 'b'] def test_7(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) t.add_column(self.b, t.index_column('a')) assert t.colnames == ['b', 'a'] def test_8(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) t.add_column(self.b, t.index_column('a') + 1) assert t.colnames == ['a', 'b'] def test_9(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a) t.add_column(self.b, t.index_column('a') + 1) t.add_column(self.c, t.index_column('b')) assert t.colnames == ['a', 'c', 'b'] def test_10(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a) ia = t.index_column('a') t.add_column(self.b, ia + 1) t.add_column(self.c, ia) assert t.colnames == ['c', 'a', 'b'] @pytest.mark.usefixtures('table_types') class TestAddName(SetupData): def test_override_name(self, table_types): self._setup(table_types) t = table_types.Table() # Check that we can override the name of the input column in the Table t.add_column(self.a, name='b') t.add_column(self.b, name='a') assert t.colnames == ['b', 'a'] # Check that we did not change the name of the input column assert self.a.info.name == 'a' assert self.b.info.name == 'b' # Now test with an input column from another table t2 = table_types.Table() t2.add_column(t['a'], name='c') assert t2.colnames == ['c'] # Check that we did not change the name of the input column assert t.colnames == ['b', 'a'] # Check that we can give a name if none was present col = table_types.Column([1, 2, 3]) t.add_column(col, name='c') assert t.colnames == ['b', 'a', 'c'] def test_default_name(self, table_types): t = table_types.Table() col = table_types.Column([1, 2, 3]) t.add_column(col) assert t.colnames == ['col0'] @pytest.mark.usefixtures('table_types') class TestInitFromTable(SetupData): def test_from_table_cols(self, table_types): """Ensure that using cols from an existing table gives a clean copy. """ self._setup(table_types) t = self.t cols = t.columns # Construct Table with cols via Table._new_from_cols t2a = table_types.Table([cols['a'], cols['b'], self.c]) # Construct with add_column t2b = table_types.Table() t2b.add_column(cols['a']) t2b.add_column(cols['b']) t2b.add_column(self.c) t['a'][1] = 20 t['b'][1] = 21 for t2 in [t2a, t2b]: t2['a'][2] = 10 t2['b'][2] = 11 t2['c'][2] = 12 t2.columns['a'].meta['aa'][3] = 10 assert np.all(t['a'] == np.array([1, 20, 3])) assert np.all(t['b'] == np.array([4, 21, 6])) assert np.all(t2['a'] == np.array([1, 2, 10])) assert np.all(t2['b'] == np.array([4, 5, 11])) assert np.all(t2['c'] == np.array([7, 8, 12])) assert t2['a'].name == 'a' assert t2.columns['a'].meta['aa'][3] == 10 assert t.columns['a'].meta['aa'][3] == 3 @pytest.mark.usefixtures('table_types') class TestAddColumns(SetupData): def test_add_columns1(self, table_types): self._setup(table_types) t = table_types.Table() t.add_columns([self.a, self.b, self.c]) assert t.colnames == ['a', 'b', 'c'] def test_add_columns2(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.add_columns([self.c, self.d]) assert t.colnames == ['a', 'b', 'c', 'd'] assert np.all(t['c'] == np.array([7, 8, 9])) def test_add_columns3(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.add_columns([self.c, self.d], indexes=[1, 0]) assert t.colnames == ['d', 'a', 'c', 'b'] def test_add_columns4(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.add_columns([self.c, self.d], indexes=[0, 0]) assert t.colnames == ['c', 'd', 'a', 'b'] def test_add_columns5(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.add_columns([self.c, self.d], indexes=[2, 2]) assert t.colnames == ['a', 'b', 'c', 'd'] def test_add_columns6(self, table_types): """Check that we can override column names.""" self._setup(table_types) t = table_types.Table() t.add_columns([self.a, self.b, self.c], names=['b', 'c', 'a']) assert t.colnames == ['b', 'c', 'a'] def test_add_columns7(self, table_types): """Check that default names are used when appropriate.""" t = table_types.Table() col0 = table_types.Column([1, 2, 3]) col1 = table_types.Column([4, 5, 3]) t.add_columns([col0, col1]) assert t.colnames == ['col0', 'col1'] def test_add_duplicate_column(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a) with pytest.raises(ValueError): t.add_column(table_types.Column(name='a', data=[0, 1, 2])) t.add_column(table_types.Column(name='a', data=[0, 1, 2]), rename_duplicate=True) t.add_column(self.b) t.add_column(self.c) assert t.colnames == ['a', 'a_1', 'b', 'c'] t.add_column(table_types.Column(name='a', data=[0, 1, 2]), rename_duplicate=True) assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2'] # test adding column from a separate Table t1 = table_types.Table() t1.add_column(self.a) with pytest.raises(ValueError): t.add_column(t1['a']) t.add_column(t1['a'], rename_duplicate=True) t1['a'][0] = 100 # Change original column assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2', 'a_3'] assert t1.colnames == ['a'] # Check new column didn't change (since name conflict forced a copy) assert t['a_3'][0] == self.a[0] # Check that rename_duplicate=True is ok if there are no duplicates t.add_column(table_types.Column(name='q', data=[0, 1, 2]), rename_duplicate=True) assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2', 'a_3', 'q'] def test_add_duplicate_columns(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b, self.c]) with pytest.raises(ValueError): t.add_columns([table_types.Column(name='a', data=[0, 1, 2]), table_types.Column(name='b', data=[0, 1, 2])]) t.add_columns([table_types.Column(name='a', data=[0, 1, 2]), table_types.Column(name='b', data=[0, 1, 2])], rename_duplicate=True) t.add_column(self.d) assert t.colnames == ['a', 'b', 'c', 'a_1', 'b_1', 'd'] @pytest.mark.usefixtures('table_types') class TestAddRow(SetupData): @property def b(self): if self._column_type is not None: if not hasattr(self, '_b'): self._b = self._column_type(name='b', data=[4.0, 5.1, 6.2]) return self._b @property def c(self): if self._column_type is not None: if not hasattr(self, '_c'): self._c = self._column_type(name='c', data=['7', '8', '9']) return self._c @property def d(self): if self._column_type is not None: if not hasattr(self, '_d'): self._d = self._column_type(name='d', data=[[1, 2], [3, 4], [5, 6]]) return self._d @property def t(self): if self._table_type is not None: if not hasattr(self, '_t'): self._t = self._table_type([self.a, self.b, self.c]) return self._t def test_add_none_to_empty_table(self, table_types): self._setup(table_types) t = table_types.Table(names=('a', 'b', 'c'), dtype=('(2,)i', 'S4', 'O')) t.add_row() assert np.all(t['a'][0] == [0, 0]) assert t['b'][0] == '' assert t['c'][0] == 0 t.add_row() assert np.all(t['a'][1] == [0, 0]) assert t['b'][1] == '' assert t['c'][1] == 0 def test_add_stuff_to_empty_table(self, table_types): self._setup(table_types) t = table_types.Table(names=('a', 'b', 'obj'), dtype=('(2,)i', 'S8', 'O')) t.add_row([[1, 2], 'hello', 'world']) assert np.all(t['a'][0] == [1, 2]) assert t['b'][0] == 'hello' assert t['obj'][0] == 'world' # Make sure it is not repeating last row but instead # adding zeros (as documented) t.add_row() assert np.all(t['a'][1] == [0, 0]) assert t['b'][1] == '' assert t['obj'][1] == 0 def test_add_table_row(self, table_types): self._setup(table_types) t = self.t t['d'] = self.d t2 = table_types.Table([self.a, self.b, self.c, self.d]) t.add_row(t2[0]) assert len(t) == 4 assert np.all(t['a'] == np.array([1, 2, 3, 1])) assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0])) assert np.all(t['c'] == np.array(['7', '8', '9', '7'])) assert np.all(t['d'] == np.array([[1, 2], [3, 4], [5, 6], [1, 2]])) def test_add_table_row_obj(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b, self.obj]) t.add_row([1, 4.0, [10]]) assert len(t) == 4 assert np.all(t['a'] == np.array([1, 2, 3, 1])) assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0])) assert np.all(t['obj'] == np.array([1, 'string', 3, [10]], dtype='O')) def test_add_qtable_row_multidimensional(self): q = [[1, 2], [3, 4]] * u.m qt = table.QTable([q]) qt.add_row(([5, 6] * u.km,)) assert np.all(qt['col0'] == [[1, 2], [3, 4], [5000, 6000]] * u.m) def test_add_with_tuple(self, table_types): self._setup(table_types) t = self.t t.add_row((4, 7.2, '1')) assert len(t) == 4 assert np.all(t['a'] == np.array([1, 2, 3, 4])) assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2])) assert np.all(t['c'] == np.array(['7', '8', '9', '1'])) def test_add_with_list(self, table_types): self._setup(table_types) t = self.t t.add_row([4, 7.2, '10']) assert len(t) == 4 assert np.all(t['a'] == np.array([1, 2, 3, 4])) assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2])) assert np.all(t['c'] == np.array(['7', '8', '9', '10'])) def test_add_with_dict(self, table_types): self._setup(table_types) t = self.t t.add_row({'a': 4, 'b': 7.2}) assert len(t) == 4 assert np.all(t['a'] == np.array([1, 2, 3, 4])) assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2])) if t.masked: assert np.all(t['c'] == np.array(['7', '8', '9', '7'])) else: assert np.all(t['c'] == np.array(['7', '8', '9', ''])) def test_add_with_none(self, table_types): self._setup(table_types) t = self.t t.add_row() assert len(t) == 4 assert np.all(t['a'].data == np.array([1, 2, 3, 0])) assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 0.0])) assert np.all(t['c'].data == np.array(['7', '8', '9', ''])) def test_add_missing_column(self, table_types): self._setup(table_types) t = self.t with pytest.raises(ValueError): t.add_row({'bad_column': 1}) def test_wrong_size_tuple(self, table_types): self._setup(table_types) t = self.t with pytest.raises(ValueError): t.add_row((1, 2)) def test_wrong_vals_type(self, table_types): self._setup(table_types) t = self.t with pytest.raises(TypeError): t.add_row(1) def test_add_row_failures(self, table_types): self._setup(table_types) t = self.t t_copy = table_types.Table(t, copy=True) # Wrong number of columns try: t.add_row([1, 2, 3, 4]) except ValueError: pass assert len(t) == 3 assert np.all(t.as_array() == t_copy.as_array()) # Wrong data type try: t.add_row(['one', 2, 3]) except ValueError: pass assert len(t) == 3 assert np.all(t.as_array() == t_copy.as_array()) def test_insert_table_row(self, table_types): """ Light testing of Table.insert_row() method. The deep testing is done via the add_row() tests which calls insert_row(index=len(self), ...), so here just test that the added index parameter is handled correctly. """ self._setup(table_types) row = (10, 40.0, 'x', [10, 20]) for index in range(-3, 4): indices = np.insert(np.arange(3), index, 3) t = table_types.Table([self.a, self.b, self.c, self.d]) t2 = t.copy() t.add_row(row) # By now we know this works t2.insert_row(index, row) for name in t.colnames: if t[name].dtype.kind == 'f': assert np.allclose(t[name][indices], t2[name]) else: assert np.all(t[name][indices] == t2[name]) for index in (-4, 4): t = table_types.Table([self.a, self.b, self.c, self.d]) with pytest.raises(IndexError): t.insert_row(index, row) @pytest.mark.usefixtures('table_types') class TestTableColumn(SetupData): def test_column_view(self, table_types): self._setup(table_types) t = self.t a = t.columns['a'] a[2] = 10 assert t['a'][2] == 10 @pytest.mark.usefixtures('table_types') class TestArrayColumns(SetupData): def test_1d(self, table_types): self._setup(table_types) b = table_types.Column(name='b', dtype=int, shape=(2, ), length=3) t = table_types.Table([self.a]) t.add_column(b) assert t['b'].shape == (3, 2) assert t['b'][0].shape == (2, ) def test_2d(self, table_types): self._setup(table_types) b = table_types.Column(name='b', dtype=int, shape=(2, 4), length=3) t = table_types.Table([self.a]) t.add_column(b) assert t['b'].shape == (3, 2, 4) assert t['b'][0].shape == (2, 4) def test_3d(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) b = table_types.Column(name='b', dtype=int, shape=(2, 4, 6), length=3) t.add_column(b) assert t['b'].shape == (3, 2, 4, 6) assert t['b'][0].shape == (2, 4, 6) @pytest.mark.usefixtures('table_types') class TestRemove(SetupData): @property def t(self): if self._table_type is not None: if not hasattr(self, '_t'): self._t = self._table_type([self.a]) return self._t @property def t2(self): if self._table_type is not None: if not hasattr(self, '_t2'): self._t2 = self._table_type([self.a, self.b, self.c]) return self._t2 def test_1(self, table_types): self._setup(table_types) self.t.remove_columns('a') assert self.t.colnames == [] assert self.t.as_array().size == 0 # Regression test for gh-8640 assert not self.t assert isinstance(self.t == None, np.ndarray) # noqa assert (self.t == None).size == 0 # noqa def test_2(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.remove_columns('a') assert self.t.colnames == ['b'] assert self.t.dtype.names == ('b',) assert np.all(self.t['b'] == np.array([4, 5, 6])) def test_3(self, table_types): """Check remove_columns works for a single column with a name of more than one character. Regression test against #2699""" self._setup(table_types) self.t['new_column'] = self.t['a'] assert 'new_column' in self.t.columns.keys() self.t.remove_columns('new_column') assert 'new_column' not in self.t.columns.keys() def test_remove_nonexistent_row(self, table_types): self._setup(table_types) with pytest.raises(IndexError): self.t.remove_row(4) def test_remove_row_0(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) self.t.remove_row(0) assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['b'] == np.array([5, 6])) def test_remove_row_1(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) self.t.remove_row(1) assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['a'] == np.array([1, 3])) def test_remove_row_2(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) self.t.remove_row(2) assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['c'] == np.array([7, 8])) def test_remove_row_slice(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) self.t.remove_rows(slice(0, 2, 1)) assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['c'] == np.array([9])) def test_remove_row_list(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) self.t.remove_rows([0, 2]) assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['c'] == np.array([8])) def test_remove_row_preserves_meta(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.remove_rows([0, 2]) assert self.t['a'].meta == {'aa': [0, 1, 2, 3, 4]} assert self.t.dtype == np.dtype([('a', 'int'), ('b', 'int')]) def test_delitem_row(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) del self.t[1] assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['a'] == np.array([1, 3])) @pytest.mark.parametrize("idx", [[0, 2], np.array([0, 2])]) def test_delitem_row_list(self, table_types, idx): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) del self.t[idx] assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['c'] == np.array([8])) def test_delitem_row_slice(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) del self.t[0:2] assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['c'] == np.array([9])) def test_delitem_row_fail(self, table_types): self._setup(table_types) with pytest.raises(IndexError): del self.t[4] def test_delitem_row_float(self, table_types): self._setup(table_types) with pytest.raises(IndexError): del self.t[1.] def test_delitem1(self, table_types): self._setup(table_types) del self.t['a'] assert self.t.colnames == [] assert self.t.as_array().size == 0 # Regression test for gh-8640 assert not self.t assert isinstance(self.t == None, np.ndarray) # noqa assert (self.t == None).size == 0 # noqa def test_delitem2(self, table_types): self._setup(table_types) del self.t2['b'] assert self.t2.colnames == ['a', 'c'] def test_delitems(self, table_types): self._setup(table_types) del self.t2['a', 'b'] assert self.t2.colnames == ['c'] def test_delitem_fail(self, table_types): self._setup(table_types) with pytest.raises(KeyError): del self.t['d'] @pytest.mark.usefixtures('table_types') class TestKeep(SetupData): def test_1(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.keep_columns([]) assert t.colnames == [] assert t.as_array().size == 0 # Regression test for gh-8640 assert not t assert isinstance(t == None, np.ndarray) # noqa assert (t == None).size == 0 # noqa def test_2(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.keep_columns('b') assert t.colnames == ['b'] assert t.dtype.names == ('b',) assert np.all(t['b'] == np.array([4, 5, 6])) @pytest.mark.usefixtures('table_types') class TestRename(SetupData): def test_1(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) t.rename_column('a', 'b') assert t.colnames == ['b'] assert t.dtype.names == ('b',) assert np.all(t['b'] == np.array([1, 2, 3])) def test_2(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.rename_column('a', 'c') t.rename_column('b', 'a') assert t.colnames == ['c', 'a'] assert t.dtype.names == ('c', 'a') if t.masked: assert t.mask.dtype.names == ('c', 'a') assert np.all(t['c'] == np.array([1, 2, 3])) assert np.all(t['a'] == np.array([4, 5, 6])) def test_rename_by_attr(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t['a'].name = 'c' t['b'].name = 'a' assert t.colnames == ['c', 'a'] assert t.dtype.names == ('c', 'a') assert np.all(t['c'] == np.array([1, 2, 3])) assert np.all(t['a'] == np.array([4, 5, 6])) def test_rename_columns(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b, self.c]) t.rename_columns(('a', 'b', 'c'), ('aa', 'bb', 'cc')) assert t.colnames == ['aa', 'bb', 'cc'] t.rename_columns(['bb', 'cc'], ['b', 'c']) assert t.colnames == ['aa', 'b', 'c'] with pytest.raises(TypeError): t.rename_columns(('aa'), ['a']) with pytest.raises(ValueError): t.rename_columns(['a'], ['b', 'c']) @pytest.mark.usefixtures('table_types') class TestSort(): def test_single(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='a', data=[2, 1, 3])) t.add_column(table_types.Column(name='b', data=[6, 5, 4])) t.add_column(table_types.Column(name='c', data=[(1, 2), (3, 4), (4, 5)])) assert np.all(t['a'] == np.array([2, 1, 3])) assert np.all(t['b'] == np.array([6, 5, 4])) t.sort('a') assert np.all(t['a'] == np.array([1, 2, 3])) assert np.all(t['b'] == np.array([5, 6, 4])) assert np.all(t['c'] == np.array([[3, 4], [1, 2], [4, 5]])) t.sort('b') assert np.all(t['a'] == np.array([3, 1, 2])) assert np.all(t['b'] == np.array([4, 5, 6])) assert np.all(t['c'] == np.array([[4, 5], [3, 4], [1, 2]])) @pytest.mark.parametrize('create_index', [False, True]) def test_single_reverse(self, table_types, create_index): t = table_types.Table() t.add_column(table_types.Column(name='a', data=[2, 1, 3])) t.add_column(table_types.Column(name='b', data=[6, 5, 4])) t.add_column(table_types.Column(name='c', data=[(1, 2), (3, 4), (4, 5)])) assert np.all(t['a'] == np.array([2, 1, 3])) assert np.all(t['b'] == np.array([6, 5, 4])) t.sort('a', reverse=True) assert np.all(t['a'] == np.array([3, 2, 1])) assert np.all(t['b'] == np.array([4, 6, 5])) assert np.all(t['c'] == np.array([[4, 5], [1, 2], [3, 4]])) t.sort('b', reverse=True) assert np.all(t['a'] == np.array([2, 1, 3])) assert np.all(t['b'] == np.array([6, 5, 4])) assert np.all(t['c'] == np.array([[1, 2], [3, 4], [4, 5]])) def test_single_big(self, table_types): """Sort a big-ish table with a non-trivial sort order""" x = np.arange(10000) y = np.sin(x) t = table_types.Table([x, y], names=('x', 'y')) t.sort('y') idx = np.argsort(y) assert np.all(t['x'] == x[idx]) assert np.all(t['y'] == y[idx]) @pytest.mark.parametrize('reverse', [True, False]) def test_empty_reverse(self, table_types, reverse): t = table_types.Table([[], []], dtype=['f4', 'U1']) t.sort('col1', reverse=reverse) def test_multiple(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1])) t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4])) assert np.all(t['a'] == np.array([2, 1, 3, 2, 3, 1])) assert np.all(t['b'] == np.array([6, 5, 4, 3, 5, 4])) t.sort(['a', 'b']) assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3])) assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5])) t.sort(['b', 'a']) assert np.all(t['a'] == np.array([2, 1, 3, 1, 3, 2])) assert np.all(t['b'] == np.array([3, 4, 4, 5, 5, 6])) t.sort(('a', 'b')) assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3])) assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5])) def test_multiple_reverse(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1])) t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4])) assert np.all(t['a'] == np.array([2, 1, 3, 2, 3, 1])) assert np.all(t['b'] == np.array([6, 5, 4, 3, 5, 4])) t.sort(['a', 'b'], reverse=True) assert np.all(t['a'] == np.array([3, 3, 2, 2, 1, 1])) assert np.all(t['b'] == np.array([5, 4, 6, 3, 5, 4])) t.sort(['b', 'a'], reverse=True) assert np.all(t['a'] == np.array([2, 3, 1, 3, 1, 2])) assert np.all(t['b'] == np.array([6, 5, 5, 4, 4, 3])) t.sort(('a', 'b'), reverse=True) assert np.all(t['a'] == np.array([3, 3, 2, 2, 1, 1])) assert np.all(t['b'] == np.array([5, 4, 6, 3, 5, 4])) def test_multiple_with_bytes(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"])) t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"])) t.add_column(table_types.Column(name='tel', data=[12, 15, 19])) t.sort(['name', 'firstname']) assert np.all([t['firstname'] == np.array([b"John", b"Jo", b"Max"])]) assert np.all([t['name'] == np.array([b"Jackson", b"Miller", b"Miller"])]) assert np.all([t['tel'] == np.array([19, 15, 12])]) def test_multiple_with_unicode(self, table_types): # Before Numpy 1.6.2, sorting with multiple column names # failed when a unicode column was present. t = table_types.Table() t.add_column(table_types.Column( name='firstname', data=[str(x) for x in ["Max", "Jo", "John"]])) t.add_column(table_types.Column( name='name', data=[str(x) for x in ["Miller", "Miller", "Jackson"]])) t.add_column(table_types.Column(name='tel', data=[12, 15, 19])) t.sort(['name', 'firstname']) assert np.all([t['firstname'] == np.array( [str(x) for x in ["John", "Jo", "Max"]])]) assert np.all([t['name'] == np.array( [str(x) for x in ["Jackson", "Miller", "Miller"]])]) assert np.all([t['tel'] == np.array([19, 15, 12])]) def test_argsort(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1])) t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4])) assert np.all(t.argsort() == t.as_array().argsort()) i0 = t.argsort('a') i1 = t.as_array().argsort(order=['a']) assert np.all(t['a'][i0] == t['a'][i1]) i0 = t.argsort(['a', 'b']) i1 = t.as_array().argsort(order=['a', 'b']) assert np.all(t['a'][i0] == t['a'][i1]) assert np.all(t['b'][i0] == t['b'][i1]) @pytest.mark.parametrize('add_index', [False, True]) def test_argsort_reverse(self, table_types, add_index): t = table_types.Table() t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1])) t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4])) if add_index: t.add_index('a') assert np.all(t.argsort(reverse=True) == np.array([4, 2, 0, 3, 1, 5])) i0 = t.argsort('a', reverse=True) i1 = np.array([4, 2, 3, 0, 5, 1]) assert np.all(t['a'][i0] == t['a'][i1]) i0 = t.argsort(['a', 'b'], reverse=True) i1 = np.array([4, 2, 0, 3, 1, 5]) assert np.all(t['a'][i0] == t['a'][i1]) assert np.all(t['b'][i0] == t['b'][i1]) def test_argsort_bytes(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"])) t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"])) t.add_column(table_types.Column(name='tel', data=[12, 15, 19])) assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0])) def test_argsort_unicode(self, table_types): # Before Numpy 1.6.2, sorting with multiple column names # failed when a unicode column was present. t = table_types.Table() t.add_column(table_types.Column( name='firstname', data=[str(x) for x in ["Max", "Jo", "John"]])) t.add_column(table_types.Column( name='name', data=[str(x) for x in ["Miller", "Miller", "Jackson"]])) t.add_column(table_types.Column(name='tel', data=[12, 15, 19])) assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0])) def test_rebuild_column_view_then_rename(self, table_types): """ Issue #2039 where renaming fails after any method that calls _rebuild_table_column_view (this includes sort and add_row). """ t = table_types.Table([[1]], names=('a',)) assert t.colnames == ['a'] assert t.dtype.names == ('a',) t.add_row((2,)) assert t.colnames == ['a'] assert t.dtype.names == ('a',) t.rename_column('a', 'b') assert t.colnames == ['b'] assert t.dtype.names == ('b',) t.sort('b') assert t.colnames == ['b'] assert t.dtype.names == ('b',) t.rename_column('b', 'c') assert t.colnames == ['c'] assert t.dtype.names == ('c',) @pytest.mark.parametrize('kwargs', [{}, {'kind': 'stable'}, {'kind': 'quicksort'}]) def test_sort_kind(kwargs): t = Table() t['a'] = [2, 1, 3, 2, 3, 1] t['b'] = [6, 5, 4, 3, 5, 4] t_struct = t.as_array() # Since sort calls Table.argsort this covers `kind` for both methods t.sort(['a', 'b'], **kwargs) assert np.all(t.as_array() == np.sort(t_struct, **kwargs)) @pytest.mark.usefixtures('table_types') class TestIterator(): def test_iterator(self, table_types): d = np.array([(2, 1), (3, 6), (4, 5)], dtype=[('a', 'i4'), ('b', 'i4')]) t = table_types.Table(d) if t.masked: with pytest.raises(ValueError): t[0] == d[0] else: for row, np_row in zip(t, d): assert np.all(row == np_row) @pytest.mark.usefixtures('table_types') class TestSetMeta(): def test_set_meta(self, table_types): d = table_types.Table(names=('a', 'b')) d.meta['a'] = 1 d.meta['b'] = 1 d.meta['c'] = 1 d.meta['d'] = 1 assert list(d.meta.keys()) == ['a', 'b', 'c', 'd'] @pytest.mark.usefixtures('table_types') class TestConvertNumpyArray(): def test_convert_numpy_array(self, table_types): d = table_types.Table([[1, 2], [3, 4]], names=('a', 'b')) np_data = np.array(d) if table_types.Table is not MaskedTable: assert np.all(np_data == d.as_array()) assert np_data is not d.as_array() assert d.colnames == list(np_data.dtype.names) np_data = np.array(d, copy=False) if table_types.Table is not MaskedTable: assert np.all(np_data == d.as_array()) assert d.colnames == list(np_data.dtype.names) with pytest.raises(ValueError): np_data = np.array(d, dtype=[('c', 'i8'), ('d', 'i8')]) def test_as_array_byteswap(self, table_types): """Test for https://github.com/astropy/astropy/pull/4080""" byte_orders = ('>', '<') native_order = byte_orders[sys.byteorder == 'little'] for order in byte_orders: col = table_types.Column([1.0, 2.0], name='a', dtype=order + 'f8') t = table_types.Table([col]) arr = t.as_array() assert arr['a'].dtype.byteorder in (native_order, '=') arr = t.as_array(keep_byteorder=True) if order == native_order: assert arr['a'].dtype.byteorder in (order, '=') else: assert arr['a'].dtype.byteorder == order def test_byteswap_fits_array(self, table_types): """ Test for https://github.com/astropy/astropy/pull/4080, demonstrating that FITS tables are converted to native byte order. """ non_native_order = ('>', '<')[sys.byteorder != 'little'] filename = get_pkg_data_filename('data/tb.fits', 'astropy.io.fits.tests') t = table_types.Table.read(filename) arr = t.as_array() for idx in range(len(arr.dtype)): assert arr.dtype[idx].byteorder != non_native_order with fits.open(filename, character_as_bytes=True) as hdul: data = hdul[1].data for colname in data.columns.names: assert np.all(data[colname] == arr[colname]) arr2 = t.as_array(keep_byteorder=True) for colname in data.columns.names: assert (data[colname].dtype.byteorder == arr2[colname].dtype.byteorder) def _assert_copies(t, t2, deep=True): assert t.colnames == t2.colnames np.testing.assert_array_equal(t.as_array(), t2.as_array()) assert t.meta == t2.meta for col, col2 in zip(t.columns.values(), t2.columns.values()): if deep: assert not np.may_share_memory(col, col2) else: assert np.may_share_memory(col, col2) def test_copy(): t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y']) t2 = t.copy() _assert_copies(t, t2) def test_copy_masked(): t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'], masked=True, meta={'name': 'test'}) t['x'].mask == [True, False, True] t2 = t.copy() _assert_copies(t, t2) def test_copy_protocol(): t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y']) t2 = copy.copy(t) t3 = copy.deepcopy(t) _assert_copies(t, t2, deep=False) _assert_copies(t, t3) def test_disallow_inequality_comparisons(): """ Regression test for #828 - disallow comparison operators on whole Table """ t = table.Table() with pytest.raises(TypeError): t > 2 with pytest.raises(TypeError): t < 1.1 with pytest.raises(TypeError): t >= 5.5 with pytest.raises(TypeError): t <= -1.1 def test_values_equal_part1(): col1 = [1, 2] col2 = [1.0, 2.0] col3 = ['a', 'b'] t1 = table.Table([col1, col2, col3], names=['a', 'b', 'c']) t2 = table.Table([col1, col2], names=['a', 'b']) t3 = table.table_helpers.simple_table() tm = t1.copy() tm['time'] = Time([1, 2], format='cxcsec') tm1 = tm.copy() tm1['time'][0] = np.ma.masked tq = table.table_helpers.simple_table() tq['quantity'] = [1., 2., 3.] * u.m tsk = table.table_helpers.simple_table() tsk['sk'] = SkyCoord(1, 2, unit='deg') eqsk = tsk.values_equal(tsk) for col in eqsk.itercols(): assert np.all(col) with pytest.raises(ValueError, match='cannot compare tables with different column names'): t2.values_equal(t1) with pytest.raises(ValueError, match='unable to compare column a'): # Shape mismatch t3.values_equal(t1) with pytest.raises(ValueError, match='unable to compare column c'): # Type mismatch in column c causes FutureWarning t1.values_equal(2) with pytest.raises(ValueError, match='unable to compare column c'): t1.values_equal([1, 2]) eq = t2.values_equal(t2) for col in eq.colnames: assert np.all(eq[col] == [True, True]) eq1 = tm1.values_equal(tm) for col in eq1.colnames: assert np.all(eq1[col] == [True, True]) eq2 = tq.values_equal(tq) for col in eq2.colnames: assert np.all(eq2[col] == [True, True, True]) eq3 = t2.values_equal(2) for col in eq3.colnames: assert np.all(eq3[col] == [False, True]) eq4 = t2.values_equal([1, 2]) for col in eq4.colnames: assert np.all(eq4[col] == [True, True]) # Compare table to its first row t = table.Table(rows=[(1, 'a'), (1, 'b')]) eq = t.values_equal(t[0]) assert np.all(eq['col0'] == [True, True]) assert np.all(eq['col1'] == [True, False]) def test_rows_equal(): t = table.Table.read([' a b c d', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 2 b 6.0 2', ' 2 a 4.0 3', ' 0 a 0.0 4', ' 1 b 3.0 5', ' 1 a 2.0 6', ' 1 a 1.0 7'], format='ascii') # All rows are equal assert np.all(t == t) # Assert no rows are different assert not np.any(t != t) # Check equality result for a given row assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool)) # Check inequality result for a given row assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool)) t2 = table.Table.read([' a b c d', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 3 b 6.0 2', ' 2 a 4.0 3', ' 0 a 1.0 4', ' 1 b 3.0 5', ' 1 c 2.0 6', ' 1 a 1.0 7', ], format='ascii') # In the above cases, Row.__eq__ gets called, but now need to make sure # Table.__eq__ also gets called. assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool)) # Check that comparing to a structured array works assert np.all((t == t2.as_array()) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) assert np.all((t.as_array() == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) def test_equality_masked(): t = table.Table.read([' a b c d', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 2 b 6.0 2', ' 2 a 4.0 3', ' 0 a 0.0 4', ' 1 b 3.0 5', ' 1 a 2.0 6', ' 1 a 1.0 7', ], format='ascii') # Make into masked table t = table.Table(t, masked=True) # All rows are equal assert np.all(t == t) # Assert no rows are different assert not np.any(t != t) # Check equality result for a given row assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool)) # Check inequality result for a given row assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool)) t2 = table.Table.read([' a b c d', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 3 b 6.0 2', ' 2 a 4.0 3', ' 0 a 1.0 4', ' 1 b 3.0 5', ' 1 c 2.0 6', ' 1 a 1.0 7', ], format='ascii') # In the above cases, Row.__eq__ gets called, but now need to make sure # Table.__eq__ also gets called. assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool)) # Check that masking a value causes the row to differ t.mask['a'][0] = True assert np.all((t == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) assert np.all((t != t2) == np.array([1, 0, 1, 0, 1, 0, 1, 0], dtype=bool)) # Check that comparing to a structured array works assert np.all((t == t2.as_array()) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) @pytest.mark.xfail def test_equality_masked_bug(): """ This highlights a Numpy bug. Once it works, it can be moved into the test_equality_masked test. Related Numpy bug report: https://github.com/numpy/numpy/issues/3840 """ t = table.Table.read([' a b c d', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 2 b 6.0 2', ' 2 a 4.0 3', ' 0 a 0.0 4', ' 1 b 3.0 5', ' 1 a 2.0 6', ' 1 a 1.0 7', ], format='ascii') t = table.Table(t, masked=True) t2 = table.Table.read([' a b c d', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 3 b 6.0 2', ' 2 a 4.0 3', ' 0 a 1.0 4', ' 1 b 3.0 5', ' 1 c 2.0 6', ' 1 a 1.0 7', ], format='ascii') assert np.all((t.as_array() == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) # Check that the meta descriptor is working as expected. The MetaBaseTest class # takes care of defining all the tests, and we simply have to define the class # and any minimal set of args to pass. class TestMetaTable(MetaBaseTest): test_class = table.Table args = () def test_unicode_content(): # If we don't have unicode literals then return if isinstance('', bytes): return # Define unicode literals string_a = 'астрономическая питона' string_b = 'миллиарды световых лет' a = table.Table( [[string_a, 2], [string_b, 3]], names=('a', 'b')) assert string_a in str(a) # This only works because the coding of this file is utf-8, which # matches the default encoding of Table.__str__ assert string_a.encode('utf-8') in bytes(a) def test_unicode_policy(): t = table.Table.read([' a b c d', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 2 b 6.0 2', ' 2 a 4.0 3', ' 0 a 0.0 4', ' 1 b 3.0 5', ' 1 a 2.0 6', ' 1 a 1.0 7', ], format='ascii') assert_follows_unicode_guidelines(t) @pytest.mark.parametrize('uni', ['питона', 'ascii']) def test_unicode_bytestring_conversion(table_types, uni): """ Test converting columns to all unicode or all bytestring. This makes two columns, one which is unicode (str in Py3) and one which is bytes (UTF-8 encoded). There are two code paths in the conversions, a faster one where the data are actually ASCII and a slower one where UTF-8 conversion is required. This tests both via the ``uni`` param. """ byt = uni.encode('utf-8') t = table_types.Table([[byt], [uni], [1]], dtype=('S', 'U', 'i')) assert t['col0'].dtype.kind == 'S' assert t['col1'].dtype.kind == 'U' assert t['col2'].dtype.kind == 'i' t['col0'].description = 'col0' t['col1'].description = 'col1' t['col0'].meta['val'] = 'val0' t['col1'].meta['val'] = 'val1' # Unicode to bytestring t1 = t.copy() t1.convert_unicode_to_bytestring() assert t1['col0'].dtype.kind == 'S' assert t1['col1'].dtype.kind == 'S' assert t1['col2'].dtype.kind == 'i' # Meta made it through assert t1['col0'].description == 'col0' assert t1['col1'].description == 'col1' assert t1['col0'].meta['val'] == 'val0' assert t1['col1'].meta['val'] == 'val1' # Need to de-fang the automatic unicode sandwiching of Table assert np.array(t1['col0'])[0] == byt assert np.array(t1['col1'])[0] == byt assert np.array(t1['col2'])[0] == 1 # Bytestring to unicode t1 = t.copy() t1.convert_bytestring_to_unicode() assert t1['col0'].dtype.kind == 'U' assert t1['col1'].dtype.kind == 'U' assert t1['col2'].dtype.kind == 'i' # Meta made it through assert t1['col0'].description == 'col0' assert t1['col1'].description == 'col1' assert t1['col0'].meta['val'] == 'val0' assert t1['col1'].meta['val'] == 'val1' # No need to de-fang the automatic unicode sandwiching of Table here, but # do just for consistency to prove things are working. assert np.array(t1['col0'])[0] == uni assert np.array(t1['col1'])[0] == uni assert np.array(t1['col2'])[0] == 1 def test_table_deletion(): """ Regression test for the reference cycle discussed in https://github.com/astropy/astropy/issues/2877 """ deleted = set() # A special table subclass which leaves a record when it is finalized class TestTable(table.Table): def __del__(self): deleted.add(id(self)) t = TestTable({'a': [1, 2, 3]}) the_id = id(t) assert t['a'].parent_table is t del t # Cleanup gc.collect() assert the_id in deleted def test_nested_iteration(): """ Regression test for issue 3358 where nested iteration over a single table fails. """ t = table.Table([[0, 1]], names=['a']) out = [] for r1 in t: for r2 in t: out.append((r1['a'], r2['a'])) assert out == [(0, 0), (0, 1), (1, 0), (1, 1)] def test_table_init_from_degenerate_arrays(table_types): t = table_types.Table(np.array([])) assert len(t.columns) == 0 with pytest.raises(ValueError): t = table_types.Table(np.array(0)) t = table_types.Table(np.array([1, 2, 3])) assert len(t.columns) == 3 @pytest.mark.skipif('not HAS_PANDAS') class TestPandas: def test_simple(self): t = table.Table() for endian in ['<', '>', '=']: for kind in ['f', 'i']: for byte in ['2', '4', '8']: dtype = np.dtype(endian + kind + byte) x = np.array([1, 2, 3], dtype=dtype) t[endian + kind + byte] = x.newbyteorder(endian) t['u'] = ['a', 'b', 'c'] t['s'] = ['a', 'b', 'c'] d = t.to_pandas() for column in t.columns: if column == 'u': assert np.all(t['u'] == np.array(['a', 'b', 'c'])) assert d[column].dtype == np.dtype("O") # upstream feature of pandas elif column == 's': assert np.all(t['s'] == np.array(['a', 'b', 'c'])) assert d[column].dtype == np.dtype("O") # upstream feature of pandas else: # We should be able to compare exact values here assert np.all(t[column] == d[column]) if t[column].dtype.isnative: assert d[column].dtype == t[column].dtype else: assert d[column].dtype == t[column].byteswap().newbyteorder().dtype # Regression test for astropy/astropy#1156 - the following code gave a # ValueError: Big-endian buffer not supported on little-endian # compiler. We now automatically swap the endian-ness to native order # upon adding the arrays to the data frame. # Explicitly testing little/big/native endian separately - # regression for a case in astropy/astropy#11286 not caught by #3729. d[['<i4', '>i4']] d[['<f4', '>f4']] t2 = table.Table.from_pandas(d) for column in t.columns: if column in ('u', 's'): assert np.all(t[column] == t2[column]) else: assert_allclose(t[column], t2[column]) if t[column].dtype.isnative: assert t[column].dtype == t2[column].dtype else: assert t[column].byteswap().newbyteorder().dtype == t2[column].dtype @pytest.mark.parametrize('unsigned', ['u', '']) @pytest.mark.parametrize('bits', [8, 16, 32, 64]) def test_nullable_int(self, unsigned, bits): np_dtype = f'{unsigned}int{bits}' c = MaskedColumn([1, 2], mask=[False, True], dtype=np_dtype) t = Table([c]) df = t.to_pandas() pd_dtype = np_dtype.replace('i', 'I').replace('u', 'U') assert str(df['col0'].dtype) == pd_dtype t2 = Table.from_pandas(df) assert str(t2['col0'].dtype) == np_dtype assert np.all(t2['col0'].mask == [False, True]) assert np.all(t2['col0'] == c) def test_2d(self): t = table.Table() t['a'] = [1, 2, 3] t['b'] = np.ones((3, 2)) with pytest.raises(ValueError, match='Cannot convert a table with multidimensional columns'): t.to_pandas() def test_mixin_pandas(self): t = table.QTable() for name in sorted(MIXIN_COLS): if not name.startswith('ndarray'): t[name] = MIXIN_COLS[name] t['dt'] = TimeDelta([0, 2, 4, 6], format='sec') tp = t.to_pandas() t2 = table.Table.from_pandas(tp) assert np.allclose(t2['quantity'], [0, 1, 2, 3]) assert np.allclose(t2['longitude'], [0., 1., 5., 6.]) assert np.allclose(t2['latitude'], [5., 6., 10., 11.]) assert np.allclose(t2['skycoord.ra'], [0, 1, 2, 3]) assert np.allclose(t2['skycoord.dec'], [0, 1, 2, 3]) assert np.allclose(t2['arraywrap'], [0, 1, 2, 3]) assert np.allclose(t2['arrayswap'], [0, 1, 2, 3]) assert np.allclose(t2['earthlocation.y'], [0, 110708, 547501, 654527], rtol=0, atol=1) # For pandas, Time, TimeDelta are the mixins that round-trip the class assert isinstance(t2['time'], Time) assert np.allclose(t2['time'].jyear, [2000, 2001, 2002, 2003]) assert np.all(t2['time'].isot == ['2000-01-01T12:00:00.000', '2000-12-31T18:00:00.000', '2002-01-01T00:00:00.000', '2003-01-01T06:00:00.000']) assert t2['time'].format == 'isot' # TimeDelta assert isinstance(t2['dt'], TimeDelta) assert np.allclose(t2['dt'].value, [0, 2, 4, 6]) assert t2['dt'].format == 'sec' @pytest.mark.parametrize('use_IndexedTable', [False, True]) def test_to_pandas_index(self, use_IndexedTable): """Test to_pandas() with different indexing options. This also tests the fix for #12014. The exception seen there is reproduced here without the fix. """ import pandas as pd class IndexedTable(table.QTable): """Always index the first column""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.add_index(self.colnames[0]) row_index = pd.RangeIndex(0, 2, 1) tm_index = pd.DatetimeIndex(['1998-01-01', '2002-01-01'], dtype='datetime64[ns]', name='tm', freq=None) tm = Time([1998, 2002], format='jyear') x = [1, 2] table_cls = IndexedTable if use_IndexedTable else table.QTable t = table_cls([tm, x], names=['tm', 'x']) tp = t.to_pandas() if not use_IndexedTable: assert np.all(tp.index == row_index) tp = t.to_pandas(index='tm') assert np.all(tp.index == tm_index) t.add_index('tm') tp = t.to_pandas() assert np.all(tp.index == tm_index) # Make sure writing to pandas didn't hack the original table assert t['tm'].info.indices tp = t.to_pandas(index=True) assert np.all(tp.index == tm_index) tp = t.to_pandas(index=False) assert np.all(tp.index == row_index) with pytest.raises(ValueError) as err: t.to_pandas(index='not a column') assert 'index must be None, False' in str(err.value) def test_mixin_pandas_masked(self): tm = Time([1, 2, 3], format='cxcsec') dt = TimeDelta([1, 2, 3], format='sec') tm[1] = np.ma.masked dt[1] = np.ma.masked t = table.QTable([tm, dt], names=['tm', 'dt']) tp = t.to_pandas() assert np.all(tp['tm'].isnull() == [False, True, False]) assert np.all(tp['dt'].isnull() == [False, True, False]) t2 = table.Table.from_pandas(tp) assert np.all(t2['tm'].mask == tm.mask) assert np.ma.allclose(t2['tm'].jd, tm.jd, rtol=1e-14, atol=1e-14) assert np.all(t2['dt'].mask == dt.mask) assert np.ma.allclose(t2['dt'].jd, dt.jd, rtol=1e-14, atol=1e-14) def test_from_pandas_index(self): tm = Time([1998, 2002], format='jyear') x = [1, 2] t = table.Table([tm, x], names=['tm', 'x']) tp = t.to_pandas(index='tm') t2 = table.Table.from_pandas(tp) assert t2.colnames == ['x'] t2 = table.Table.from_pandas(tp, index=True) assert t2.colnames == ['tm', 'x'] assert np.allclose(t2['tm'].jyear, tm.jyear) @pytest.mark.parametrize('use_nullable_int', [True, False]) def test_masking(self, use_nullable_int): t = table.Table(masked=True) t['a'] = [1, 2, 3] t['a'].mask = [True, False, True] t['b'] = [1., 2., 3.] t['b'].mask = [False, False, True] t['u'] = ['a', 'b', 'c'] t['u'].mask = [False, True, False] t['s'] = ['a', 'b', 'c'] t['s'].mask = [False, True, False] # https://github.com/astropy/astropy/issues/7741 t['Source'] = [2584290278794471936, 2584290038276303744, 2584288728310999296] t['Source'].mask = [False, False, False] if use_nullable_int: # Default # No warning with the default use_nullable_int=True d = t.to_pandas(use_nullable_int=use_nullable_int) else: with pytest.warns(TableReplaceWarning, match=r"converted column 'a' from int(32|64) to float64"): d = t.to_pandas(use_nullable_int=use_nullable_int) t2 = table.Table.from_pandas(d) for name, column in t.columns.items(): assert np.all(column.data == t2[name].data) if hasattr(t2[name], 'mask'): assert np.all(column.mask == t2[name].mask) if column.dtype.kind == 'i': if np.any(column.mask) and not use_nullable_int: assert t2[name].dtype.kind == 'f' else: assert t2[name].dtype.kind == 'i' assert_array_equal(column.data, t2[name].data.astype(column.dtype)) else: if column.dtype.byteorder in ('=', '|'): assert column.dtype == t2[name].dtype else: assert column.byteswap().newbyteorder().dtype == t2[name].dtype def test_units(self): import pandas as pd import astropy.units as u df = pd.DataFrame({'x': [1, 2, 3], 't': [1.3, 1.2, 1.8]}) t = table.Table.from_pandas(df, units={'x': u.m, 't': u.s}) assert t['x'].unit == u.m assert t['t'].unit == u.s # test error if not a mapping with pytest.raises(TypeError): table.Table.from_pandas(df, units=[u.m, u.s]) # test warning is raised if additional columns in units dict with pytest.warns(UserWarning) as record: table.Table.from_pandas(df, units={'x': u.m, 't': u.s, 'y': u.m}) assert len(record) == 1 assert "{'y'}" in record[0].message.args[0] def test_to_pandas_masked_int_data_with__index(self): data = {"data": [0, 1, 2], "index": [10, 11, 12]} t = table.Table(data=data, masked=True) t.add_index("index") t["data"].mask = [1, 1, 0] df = t.to_pandas() assert df["data"].iloc[-1] == 2 @pytest.mark.usefixtures('table_types') class TestReplaceColumn(SetupData): def test_fail_replace_column(self, table_types): """Raise exception when trying to replace column via table.columns object""" self._setup(table_types) t = table_types.Table([self.a, self.b]) with pytest.raises(ValueError, match=r"Cannot replace column 'a'. Use " "Table.replace_column.. instead."): t.columns['a'] = [1, 2, 3] with pytest.raises(ValueError, match=r"column name not there is not in the table"): t.replace_column('not there', [1, 2, 3]) with pytest.raises(ValueError, match=r"length of new column must match table length"): t.replace_column('a', [1, 2]) def test_replace_column(self, table_types): """Replace existing column with a new column""" self._setup(table_types) t = table_types.Table([self.a, self.b]) ta = t['a'] tb = t['b'] vals = [1.2, 3.4, 5.6] for col in (vals, table_types.Column(vals), table_types.Column(vals, name='a'), table_types.Column(vals, name='b')): t.replace_column('a', col) assert np.all(t['a'] == vals) assert t['a'] is not ta # New a column assert t['b'] is tb # Original b column unchanged assert t.colnames == ['a', 'b'] assert t['a'].meta == {} assert t['a'].format is None # Special case: replacing the only column can resize table del t['b'] assert len(t) == 3 t['a'] = [1, 2] assert len(t) == 2 def test_replace_index_column(self, table_types): """Replace index column and generate expected exception""" self._setup(table_types) t = table_types.Table([self.a, self.b]) t.add_index('a') with pytest.raises(ValueError) as err: t.replace_column('a', [1, 2, 3]) assert err.value.args[0] == 'cannot replace a table index column' def test_replace_column_no_copy(self): t = Table([[1, 2], [3, 4]], names=['a', 'b']) a = np.array([1.5, 2.5]) t.replace_column('a', a, copy=False) assert t['a'][0] == a[0] t['a'][0] = 10 assert t['a'][0] == a[0] class TestQTableColumnConversionCornerCases: def test_replace_with_masked_col_with_units_in_qtable(self): """This is a small regression from #8902""" t = QTable([[1, 2], [3, 4]], names=['a', 'b']) t['a'] = MaskedColumn([5, 6], unit='m') assert isinstance(t['a'], u.Quantity) def test_do_not_replace_string_column_with_units_in_qtable(self): t = QTable([[1*u.m]]) with pytest.warns(AstropyUserWarning, match='convert it to Quantity failed'): t['a'] = Column(['a'], unit=u.m) assert isinstance(t['a'], Column) class Test__Astropy_Table__(): """ Test initializing a Table subclass from a table-like object that implements the __astropy_table__ interface method. """ class SimpleTable: def __init__(self): self.columns = [[1, 2, 3], [4, 5, 6], [7, 8, 9] * u.m] self.names = ['a', 'b', 'c'] self.meta = OrderedDict([('a', 1), ('b', 2)]) def __astropy_table__(self, cls, copy, **kwargs): a, b, c = self.columns c.info.name = 'c' cols = [table.Column(a, name='a'), table.MaskedColumn(b, name='b'), c] names = [col.info.name for col in cols] return cls(cols, names=names, copy=copy, meta=kwargs or self.meta) def test_simple_1(self): """Make a SimpleTable and convert to Table, QTable with copy=False, True""" for table_cls in (table.Table, table.QTable): col_c_class = u.Quantity if table_cls is table.QTable else table.Column for cpy in (False, True): st = self.SimpleTable() # Test putting in a non-native kwarg `extra_meta` to Table initializer t = table_cls(st, copy=cpy, extra_meta='extra!') assert t.colnames == ['a', 'b', 'c'] assert t.meta == {'extra_meta': 'extra!'} assert np.all(t['a'] == st.columns[0]) assert np.all(t['b'] == st.columns[1]) vals = t['c'].value if table_cls is table.QTable else t['c'] assert np.all(st.columns[2].value == vals) assert isinstance(t['a'], table.Column) assert isinstance(t['b'], table.MaskedColumn) assert isinstance(t['c'], col_c_class) assert t['c'].unit is u.m assert type(t) is table_cls # Copy being respected? t['a'][0] = 10 assert st.columns[0][0] == 1 if cpy else 10 def test_simple_2(self): """Test converting a SimpleTable and changing column names and types""" st = self.SimpleTable() dtypes = [np.int32, np.float32, np.float16] names = ['a', 'b', 'c'] meta = OrderedDict([('c', 3)]) t = table.Table(st, dtype=dtypes, names=names, meta=meta) assert t.colnames == names assert all(col.dtype.type is dtype for col, dtype in zip(t.columns.values(), dtypes)) # The supplied meta is overrides the existing meta. Changed in astropy 3.2. assert t.meta != st.meta assert t.meta == meta def test_kwargs_exception(self): """If extra kwargs provided but without initializing with a table-like object, exception is raised""" with pytest.raises(TypeError) as err: table.Table([[1]], extra_meta='extra!') assert '__init__() got unexpected keyword argument' in str(err.value) class TestUpdate(): def _setup(self): self.a = Column((1, 2, 3), name='a') self.b = Column((4, 5, 6), name='b') self.c = Column((7, 8, 9), name='c') self.d = Column((10, 11, 12), name='d') def test_different_lengths(self): self._setup() t1 = Table([self.a]) t2 = Table([self.b[:-1]]) msg = 'Inconsistent data column lengths' with pytest.raises(ValueError, match=msg): t1.update(t2) # If update didn't succeed then t1 and t2 should not have changed. assert t1.colnames == ['a'] assert np.all(t1['a'] == self.a) assert t2.colnames == ['b'] assert np.all(t2['b'] == self.b[:-1]) def test_invalid_inputs(self): # If input is invalid then nothing should be modified. self._setup() t = Table([self.a]) d = {'b': self.b, 'c': [0]} msg = 'Inconsistent data column lengths: {1, 3}' with pytest.raises(ValueError, match=msg): t.update(d) assert t.colnames == ['a'] assert np.all(t['a'] == self.a) assert d == {'b': self.b, 'c': [0]} def test_metadata_conflict(self): self._setup() t1 = Table([self.a], meta={'a': 0, 'b': [0], 'c': True}) t2 = Table([self.b], meta={'a': 1, 'b': [1]}) t2meta = copy.deepcopy(t2.meta) t1.update(t2) assert t1.meta == {'a': 1, 'b': [0, 1], 'c': True} # t2 metadata should not have changed. assert t2.meta == t2meta def test_update(self): self._setup() t1 = Table([self.a, self.b]) t2 = Table([self.b, self.c]) t2['b'] += 1 t1.update(t2) assert t1.colnames == ['a', 'b', 'c'] assert np.all(t1['a'] == self.a) assert np.all(t1['b'] == self.b+1) assert np.all(t1['c'] == self.c) # t2 should not have changed. assert t2.colnames == ['b', 'c'] assert np.all(t2['b'] == self.b+1) assert np.all(t2['c'] == self.c) d = {'b': list(self.b), 'd': list(self.d)} dc = copy.deepcopy(d) t2.update(d) assert t2.colnames == ['b', 'c', 'd'] assert np.all(t2['b'] == self.b) assert np.all(t2['c'] == self.c) assert np.all(t2['d'] == self.d) # d should not have changed. assert d == dc # Columns were copied, so changing t2 shouldn't have affected t1. assert t1.colnames == ['a', 'b', 'c'] assert np.all(t1['a'] == self.a) assert np.all(t1['b'] == self.b+1) assert np.all(t1['c'] == self.c) def test_update_without_copy(self): self._setup() t1 = Table([self.a, self.b]) t2 = Table([self.b, self.c]) t1.update(t2, copy=False) t2['b'] -= 1 assert t1.colnames == ['a', 'b', 'c'] assert np.all(t1['a'] == self.a) assert np.all(t1['b'] == self.b-1) assert np.all(t1['c'] == self.c) d = {'b': np.array(self.b), 'd': np.array(self.d)} t2.update(d, copy=False) d['b'] *= 2 assert t2.colnames == ['b', 'c', 'd'] assert np.all(t2['b'] == 2*self.b) assert np.all(t2['c'] == self.c) assert np.all(t2['d'] == self.d) def test_table_meta_copy(): """ Test no copy vs light (key) copy vs deep copy of table meta for different situations. #8404. """ t = table.Table([[1]]) meta = {1: [1, 2]} # Assigning meta directly implies using direct object reference t.meta = meta assert t.meta is meta # Table slice implies key copy, so values are unchanged t2 = t[:] assert t2.meta is not t.meta # NOT the same OrderedDict object but equal assert t2.meta == t.meta assert t2.meta[1] is t.meta[1] # Value IS the list same object # Table init with copy=False implies key copy t2 = table.Table(t, copy=False) assert t2.meta is not t.meta # NOT the same OrderedDict object but equal assert t2.meta == t.meta assert t2.meta[1] is t.meta[1] # Value IS the same list object # Table init with copy=True implies deep copy t2 = table.Table(t, copy=True) assert t2.meta is not t.meta # NOT the same OrderedDict object but equal assert t2.meta == t.meta assert t2.meta[1] is not t.meta[1] # Value is NOT the same list object def test_table_meta_copy_with_meta_arg(): """ Test no copy vs light (key) copy vs deep copy of table meta when meta is supplied as a table init argument. #8404. """ meta = {1: [1, 2]} meta2 = {2: [3, 4]} t = table.Table([[1]], meta=meta, copy=False) assert t.meta is meta t = table.Table([[1]], meta=meta) # default copy=True assert t.meta is not meta assert t.meta == meta # Test initializing from existing table with meta with copy=False t2 = table.Table(t, meta=meta2, copy=False) assert t2.meta is meta2 assert t2.meta != t.meta # Change behavior in #8404 # Test initializing from existing table with meta with default copy=True t2 = table.Table(t, meta=meta2) assert t2.meta is not meta2 assert t2.meta != t.meta # Change behavior in #8404 # Table init with copy=True and empty dict meta gets that empty dict t2 = table.Table(t, copy=True, meta={}) assert t2.meta == {} # Table init with copy=True and kwarg meta=None gets the original table dict. # This is a somewhat ambiguous case because it could be interpreted as the # user wanting NO meta set on the output. This could be implemented by inspecting # call args. t2 = table.Table(t, copy=True, meta=None) assert t2.meta == t.meta # Test initializing empty table with meta with copy=False t = table.Table(meta=meta, copy=False) assert t.meta is meta assert t.meta[1] is meta[1] # Test initializing empty table with meta with default copy=True (deepcopy meta) t = table.Table(meta=meta) assert t.meta is not meta assert t.meta == meta assert t.meta[1] is not meta[1] def test_replace_column_qtable(): """Replace existing Quantity column with a new column in a QTable""" a = [1, 2, 3] * u.m b = [4, 5, 6] t = table.QTable([a, b], names=['a', 'b']) ta = t['a'] tb = t['b'] ta.info.meta = {'aa': [0, 1, 2, 3, 4]} ta.info.format = '%f' t.replace_column('a', a.to('cm')) assert np.all(t['a'] == ta) assert t['a'] is not ta # New a column assert t['b'] is tb # Original b column unchanged assert t.colnames == ['a', 'b'] assert t['a'].info.meta is None assert t['a'].info.format is None def test_replace_update_column_via_setitem(): """ Test table update like ``t['a'] = value``. This leverages off the already well-tested ``replace_column`` and in-place update ``t['a'][:] = value``, so this testing is fairly light. """ a = [1, 2] * u.m b = [3, 4] t = table.QTable([a, b], names=['a', 'b']) assert isinstance(t['a'], u.Quantity) # Inplace update ta = t['a'] t['a'] = 5 * u.m assert np.all(t['a'] == [5, 5] * u.m) assert t['a'] is ta # Replace t['a'] = [5, 6] assert np.all(t['a'] == [5, 6]) assert isinstance(t['a'], table.Column) assert t['a'] is not ta def test_replace_update_column_via_setitem_warnings_normal(): """ Test warnings related to table replace change in #5556: Normal warning-free replace """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) with table.conf.set_temp('replace_warnings', ['refcount', 'attributes', 'slice']): t['a'] = 0 # in-place update t['a'] = [10, 20, 30] # replace column def test_replace_update_column_via_setitem_warnings_slice(): """ Test warnings related to table replace change in #5556: Replace a slice, one warning. """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) with table.conf.set_temp('replace_warnings', ['refcount', 'attributes', 'slice']): t2 = t[:2] t2['a'] = 0 # in-place slice update assert np.all(t['a'] == [0, 0, 3]) with pytest.warns(TableReplaceWarning, match="replaced column 'a' " "which looks like an array slice") as w: t2['a'] = [10, 20] # replace slice assert len(w) == 1 def test_replace_update_column_via_setitem_warnings_attributes(): """ Test warnings related to table replace change in #5556: Lost attributes. """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) t['a'].unit = 'm' with pytest.warns(TableReplaceWarning, match=r"replaced column 'a' " r"and column attributes \['unit'\]") as w: with table.conf.set_temp('replace_warnings', ['refcount', 'attributes', 'slice']): t['a'] = [10, 20, 30] assert len(w) == 1 def test_replace_update_column_via_setitem_warnings_refcount(): """ Test warnings related to table replace change in #5556: Reference count changes. """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) ta = t['a'] # noqa : Generate an extra reference to original column with pytest.warns(TableReplaceWarning, match="replaced column 'a' and the " "number of references") as w: with table.conf.set_temp('replace_warnings', ['refcount', 'attributes', 'slice']): t['a'] = [10, 20, 30] assert len(w) == 1 def test_replace_update_column_via_setitem_warnings_always(): """ Test warnings related to table replace change in #5556: Test 'always' setting that raises warning for any replace. """ from inspect import currentframe, getframeinfo t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) with table.conf.set_temp('replace_warnings', ['always']): t['a'] = 0 # in-place slice update with pytest.warns(TableReplaceWarning, match="replaced column 'a'") as w: frameinfo = getframeinfo(currentframe()) t['a'] = [10, 20, 30] # replace column assert len(w) == 1 # Make sure the warning points back to the user code line assert w[0].lineno == frameinfo.lineno + 1 assert 'test_table' in w[0].filename def test_replace_update_column_via_setitem_replace_inplace(): """ Test the replace_inplace config option related to #5556. In this case no replace is done. """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) ta = t['a'] t['a'].unit = 'm' with table.conf.set_temp('replace_inplace', True): with table.conf.set_temp('replace_warnings', ['always', 'refcount', 'attributes', 'slice']): t['a'] = 0 # in-place update assert ta is t['a'] t['a'] = [10, 20, 30] # normally replaces column, but not now assert ta is t['a'] assert np.all(t['a'] == [10, 20, 30]) def test_primary_key_is_inherited(): """Test whether a new Table inherits the primary_key attribute from its parent Table. Issue #4672""" t = table.Table([(2, 3, 2, 1), (8, 7, 6, 5)], names=('a', 'b')) t.add_index('a') original_key = t.primary_key # can't test if tuples are equal, so just check content assert original_key[0] == 'a' t2 = t[:] t3 = t.copy() t4 = table.Table(t) # test whether the reference is the same in the following assert original_key == t2.primary_key assert original_key == t3.primary_key assert original_key == t4.primary_key # just test one element, assume rest are equal if assert passes assert t.loc[1] == t2.loc[1] assert t.loc[1] == t3.loc[1] assert t.loc[1] == t4.loc[1] def test_qtable_read_for_ipac_table_with_char_columns(): '''Test that a char column of a QTable is assigned no unit and not a dimensionless unit, otherwise conversion of reader output to QTable fails.''' t1 = table.QTable([["A"]], names="B") out = StringIO() t1.write(out, format="ascii.ipac") t2 = table.QTable.read(out.getvalue(), format="ascii.ipac", guess=False) assert t2["B"].unit is None def test_create_table_from_final_row(): """Regression test for issue #8422: passing the last row of a table into Table should return a new table containing that row.""" t1 = table.Table([(1, 2)], names=['col']) row = t1[-1] t2 = table.Table(row)['col'] assert t2[0] == 2 def test_key_values_in_as_array(): # Test for checking column slicing using key_values in Table.as_array() data_rows = [(1, 2.0, 'x'), (4, 5.0, 'y'), (5, 8.2, 'z')] # Creating a table with three columns t1 = table.Table(rows=data_rows, names=('a', 'b', 'c'), meta={'name': 'first table'}, dtype=('i4', 'f8', 'S1')) # Values of sliced column a,b is stored in a numpy array a = np.array([(1, 2.), (4, 5.), (5, 8.2)], dtype=[('a', '<i4'), ('b', '<f8')]) # Values for sliced column c is stored in a numpy array b = np.array([(b'x',), (b'y',), (b'z',)], dtype=[('c', 'S1')]) # Comparing initialised array with sliced array using Table.as_array() assert np.array_equal(a, t1.as_array(names=['a', 'b'])) assert np.array_equal(b, t1.as_array(names=['c'])) def test_tolist(): t = table.Table([[1, 2, 3], [1.1, 2.2, 3.3], [b'foo', b'bar', b'hello']], names=('a', 'b', 'c')) assert t['a'].tolist() == [1, 2, 3] assert_array_equal(t['b'].tolist(), [1.1, 2.2, 3.3]) assert t['c'].tolist() == ['foo', 'bar', 'hello'] assert isinstance(t['a'].tolist()[0], int) assert isinstance(t['b'].tolist()[0], float) assert isinstance(t['c'].tolist()[0], str) t = table.Table([[[1, 2], [3, 4]], [[b'foo', b'bar'], [b'hello', b'world']]], names=('a', 'c')) assert t['a'].tolist() == [[1, 2], [3, 4]] assert t['c'].tolist() == [['foo', 'bar'], ['hello', 'world']] assert isinstance(t['a'].tolist()[0][0], int) assert isinstance(t['c'].tolist()[0][0], str) class MyTable(Table): foo = TableAttribute() bar = TableAttribute(default=[]) baz = TableAttribute(default=1) def test_table_attribute(): assert repr(MyTable.baz) == '<TableAttribute name=baz default=1>' t = MyTable([[1, 2]]) # __attributes__ created on the fly on the first access of an attribute # that has a non-None default. assert '__attributes__' not in t.meta assert t.foo is None assert '__attributes__' not in t.meta assert t.baz == 1 assert '__attributes__' in t.meta t.bar.append(2.0) assert t.bar == [2.0] assert t.baz == 1 t.baz = 'baz' assert t.baz == 'baz' # Table attributes round-trip through pickle tp = pickle.loads(pickle.dumps(t)) assert tp.foo is None assert tp.baz == 'baz' assert tp.bar == [2.0] # Allow initialization of attributes in table creation, with / without data for data in None, [[1, 2]]: t2 = MyTable(data, foo=3, bar='bar', baz='baz') assert t2.foo == 3 assert t2.bar == 'bar' assert t2.baz == 'baz' # Initializing from an existing MyTable works, with and without kwarg attrs t3 = MyTable(t2) assert t3.foo == 3 assert t3.bar == 'bar' assert t3.baz == 'baz' t3 = MyTable(t2, foo=5, bar='fubar') assert t3.foo == 5 assert t3.bar == 'fubar' assert t3.baz == 'baz' # Deleting attributes removes it from attributes del t.baz assert 'baz' not in t.meta['__attributes__'] del t.bar assert '__attributes__' not in t.meta def test_table_attribute_ecsv(): # Table attribute round-trip through ECSV t = MyTable([[1, 2]], bar=[2.0], baz='baz') out = StringIO() t.write(out, format='ascii.ecsv') t2 = MyTable.read(out.getvalue(), format='ascii.ecsv') assert t2.foo is None assert t2.bar == [2.0] assert t2.baz == 'baz' def test_table_attribute_fail(): # Code raises ValueError(f'{attr} not allowed as TableAttribute') but in this # context it gets re-raised as a RuntimeError during class definition. with pytest.raises(RuntimeError, match='Error calling __set_name__'): class MyTable2(Table): descriptions = TableAttribute() # Conflicts with init arg with pytest.raises(RuntimeError, match='Error calling __set_name__'): class MyTable3(Table): colnames = TableAttribute() # Conflicts with built-in property def test_set_units_fail(): dat = [[1.0, 2.0], ['aa', 'bb']] with pytest.raises(ValueError, match='sequence of unit values must match number of columns'): Table(dat, units=[u.m]) with pytest.raises(ValueError, match='invalid column name c for setting unit attribute'): Table(dat, units={'c': u.m}) def test_set_units(): dat = [[1.0, 2.0], ['aa', 'bb'], [3, 4]] exp_units = (u.m, None, None) for cls in Table, QTable: for units in ({'a': u.m, 'c': ''}, exp_units): qt = cls(dat, units=units, names=['a', 'b', 'c']) if cls is QTable: assert isinstance(qt['a'], u.Quantity) assert isinstance(qt['b'], table.Column) assert isinstance(qt['c'], table.Column) for col, unit in zip(qt.itercols(), exp_units): assert col.info.unit is unit def test_set_descriptions(): dat = [[1.0, 2.0], ['aa', 'bb']] exp_descriptions = ('my description', None) for cls in Table, QTable: for descriptions in ({'a': 'my description'}, exp_descriptions): qt = cls(dat, descriptions=descriptions, names=['a', 'b']) for col, description in zip(qt.itercols(), exp_descriptions): assert col.info.description == description def test_set_units_from_row(): text = ['a,b', ',s', '1,2', '3,4'] units = Table.read(text, format='ascii', data_start=1, data_end=2)[0] t = Table.read(text, format='ascii', data_start=2, units=units) assert isinstance(units, table.Row) assert t['a'].info.unit is None assert t['b'].info.unit is u.s def test_set_units_descriptions_read(): """Test setting units and descriptions via Table.read. The test here is less comprehensive because the implementation is exactly the same as for Table.__init__ (calling Table._set_column_attribute) """ for cls in Table, QTable: t = cls.read(['a b', '1 2'], format='ascii', units=[u.m, u.s], descriptions=['hi', 'there']) assert t['a'].info.unit is u.m assert t['b'].info.unit is u.s assert t['a'].info.description == 'hi' assert t['b'].info.description == 'there' def test_broadcasting_8933(): """Explicitly check re-work of code related to broadcasting in #8933""" t = table.Table([[1, 2]]) # Length=2 table t['a'] = [[3, 4]] # Can broadcast if ndim > 1 and shape[0] == 1 t['b'] = 5 t['c'] = [1] # Treat as broadcastable scalar, not length=1 array (which would fail) assert np.all(t['a'] == [[3, 4], [3, 4]]) assert np.all(t['b'] == [5, 5]) assert np.all(t['c'] == [1, 1]) # Test that broadcasted column is writeable t['c'][1] = 10 assert np.all(t['c'] == [1, 10]) def test_custom_masked_column_in_nonmasked_table(): """Test the refactor and change in column upgrades introduced in 95902650f. This fixes a regression introduced by #8789 (Change behavior of Table regarding masked columns).""" class MyMaskedColumn(table.MaskedColumn): pass class MySubMaskedColumn(MyMaskedColumn): pass class MyColumn(table.Column): pass class MySubColumn(MyColumn): pass class MyTable(table.Table): Column = MyColumn MaskedColumn = MyMaskedColumn a = table.Column([1]) b = table.MaskedColumn([2], mask=[True]) c = MyMaskedColumn([3], mask=[True]) d = MySubColumn([4]) e = MySubMaskedColumn([5], mask=[True]) # Two different pathways for making table t1 = MyTable([a, b, c, d, e], names=['a', 'b', 'c', 'd', 'e']) t2 = MyTable() t2['a'] = a t2['b'] = b t2['c'] = c t2['d'] = d t2['e'] = e for t in (t1, t2): assert type(t['a']) is MyColumn assert type(t['b']) is MyMaskedColumn # upgrade assert type(t['c']) is MyMaskedColumn assert type(t['d']) is MySubColumn assert type(t['e']) is MySubMaskedColumn # sub-class not downgraded def test_sort_with_mutable_skycoord(): """Test sorting a table that has a mutable column such as SkyCoord. In this case the sort is done in-place """ t = Table([[2, 1], SkyCoord([4, 3], [6, 5], unit='deg,deg')], names=['a', 'sc']) meta = {'a': [1, 2]} ta = t['a'] tsc = t['sc'] t['sc'].info.meta = meta t.sort('a') assert np.all(t['a'] == [1, 2]) assert np.allclose(t['sc'].ra.to_value(u.deg), [3, 4]) assert np.allclose(t['sc'].dec.to_value(u.deg), [5, 6]) assert t['a'] is ta assert t['sc'] is tsc # Prior to astropy 4.1 this was a deep copy of SkyCoord column; after 4.1 # it is a reference. t['sc'].info.meta['a'][0] = 100 assert meta['a'][0] == 100 def test_sort_with_non_mutable(): """Test sorting a table that has a non-mutable column. """ t = Table([[2, 1], [3, 4]], names=['a', 'b']) ta = t['a'] tb = t['b'] t['b'].setflags(write=False) meta = {'a': [1, 2]} t['b'].info.meta = meta t.sort('a') assert np.all(t['a'] == [1, 2]) assert np.all(t['b'] == [4, 3]) assert ta is t['a'] assert tb is not t['b'] # Prior to astropy 4.1 this was a deep copy of SkyCoord column; after 4.1 # it is a reference. t['b'].info.meta['a'][0] = 100 assert meta['a'][0] == 1 def test_init_with_list_of_masked_arrays(): """Test the fix for #8977""" m0 = np.ma.array([0, 1, 2], mask=[True, False, True]) m1 = np.ma.array([3, 4, 5], mask=[False, True, False]) mc = [m0, m1] # Test _init_from_list t = table.Table([mc], names=['a']) # Test add_column t['b'] = [m1, m0] assert t['a'].shape == (2, 3) assert np.all(t['a'][0] == m0) assert np.all(t['a'][1] == m1) assert np.all(t['a'][0].mask == m0.mask) assert np.all(t['a'][1].mask == m1.mask) assert t['b'].shape == (2, 3) assert np.all(t['b'][0] == m1) assert np.all(t['b'][1] == m0) assert np.all(t['b'][0].mask == m1.mask) assert np.all(t['b'][1].mask == m0.mask) def test_data_to_col_convert_strategy(): """Test the update to how data_to_col works (#8972), using the regression example from #8971. """ t = table.Table([[0, 1]]) t['a'] = 1 t['b'] = np.int64(2) # Failed previously assert np.all(t['a'] == [1, 1]) assert np.all(t['b'] == [2, 2]) def test_rows_with_mixins(): """Test for #9165 to allow adding a list of mixin objects. Also test for fix to #9357 where group_by() failed due to mixin object not having info.indices set to []. """ tm = Time([1, 2], format='cxcsec') q = [1, 2] * u.m mixed1 = [1 * u.m, 2] # Mixed input, fails to convert to Quantity mixed2 = [2, 1 * u.m] # Mixed input, not detected as potential mixin rows = [(1, q[0], tm[0]), (2, q[1], tm[1])] t = table.QTable(rows=rows) t['a'] = [q[0], q[1]] t['b'] = [tm[0], tm[1]] t['m1'] = mixed1 t['m2'] = mixed2 assert np.all(t['col1'] == q) assert np.all(t['col2'] == tm) assert np.all(t['a'] == q) assert np.all(t['b'] == tm) assert np.all(t['m1'][ii] == mixed1[ii] for ii in range(2)) assert np.all(t['m2'][ii] == mixed2[ii] for ii in range(2)) assert type(t['m1']) is table.Column assert t['m1'].dtype is np.dtype(object) assert type(t['m2']) is table.Column assert t['m2'].dtype is np.dtype(object) # Ensure group_by() runs without failing for sortable columns. # The columns 'm1', and 'm2' are object dtype and not sortable. for name in ['col0', 'col1', 'col2', 'a', 'b']: t.group_by(name) # For good measure include exactly the failure in #9357 in which the # list of Time() objects is in the Table initializer. mjds = [Time(58000, format="mjd")] t = Table([mjds, ["gbt"]], names=("mjd", "obs")) t.group_by("obs") def test_iterrows(): dat = [(1, 2, 3), (4, 5, 6), (7, 8, 6)] t = table.Table(rows=dat, names=('a', 'b', 'c')) c_s = [] a_s = [] for c, a in t.iterrows('c', 'a'): a_s.append(a) c_s.append(c) assert np.all(t['a'] == a_s) assert np.all(t['c'] == c_s) rows = [row for row in t.iterrows()] assert rows == dat with pytest.raises(ValueError, match='d is not a valid column name'): t.iterrows('d') def test_values_and_types(): dat = [(1, 2, 3), (4, 5, 6), (7, 8, 6)] t = table.Table(rows=dat, names=('a', 'b', 'c')) assert isinstance(t.values(), type(OrderedDict().values())) assert isinstance(t.columns.values(), type(OrderedDict().values())) assert isinstance(t.columns.keys(), type(OrderedDict().keys())) for i in t.values(): assert isinstance(i, table.column.Column) def test_items(): dat = [(1, 2, 3), (4, 5, 6), (7, 8, 9)] t = table.Table(rows=dat, names=('a', 'b', 'c')) assert isinstance(t.items(), type(OrderedDict({}).items())) for i in list(t.items()): assert isinstance(i, tuple) def test_read_write_not_replaceable(): t = table.Table() with pytest.raises(AttributeError): t.read = 'fake_read' with pytest.raises(AttributeError): t.write = 'fake_write' def test_keep_columns_with_generator(): # Regression test for #12529 t = table.table_helpers.simple_table(1) t.keep_columns(col for col in t.colnames if col == 'a') assert t.colnames == ['a'] def test_remove_columns_with_generator(): # Regression test for #12529 t = table.table_helpers.simple_table(1) t.remove_columns(col for col in t.colnames if col == 'a') assert t.colnames == ['b', 'c'] def test_keep_columns_invalid_names_messages(): t = table.table_helpers.simple_table(1) with pytest.raises(KeyError, match='column "d" does not exist'): t.keep_columns(['c', 'd']) with pytest.raises(KeyError, match='columns {\'[de]\', \'[de]\'} do not exist'): t.keep_columns(['c', 'd', 'e']) def test_remove_columns_invalid_names_messages(): t = table.table_helpers.simple_table(1) with pytest.raises(KeyError, match='column "d" does not exist'): t.remove_columns(['c', 'd']) with pytest.raises(KeyError, match='columns {\'[de]\', \'[de]\'} do not exist'): t.remove_columns(['c', 'd', 'e']) @pytest.mark.parametrize("path_type", ['str', 'Path']) def test_read_write_tilde_path(path_type, home_is_tmpdir): if path_type == 'str': test_file = os.path.join('~', 'test.csv') else: test_file = pathlib.Path('~', 'test.csv') t1 = Table() t1['a'] = [1, 2, 3] t1.write(test_file) t2 = Table.read(test_file) assert np.all(t2['a'] == [1, 2, 3]) # Ensure the data wasn't written to the literal tilde-prefixed path assert not os.path.exists(test_file)
c7e35732a8a171873b0aad59a7d80518bc66a0a20469c1e37e326700ec5f321b
import os import re import pytest from astropy.table.scripts import showtable ROOT = os.path.abspath(os.path.dirname(__file__)) ASCII_ROOT = os.path.join(ROOT, '..', '..', 'io', 'ascii', 'tests') FITS_ROOT = os.path.join(ROOT, '..', '..', 'io', 'fits', 'tests') VOTABLE_ROOT = os.path.join(ROOT, '..', '..', 'io', 'votable', 'tests') def test_missing_file(capsys): showtable.main(['foobar.fits']) out, err = capsys.readouterr() assert err.startswith("ERROR: [Errno 2] No such file or directory: " "'foobar.fits'") def test_info(capsys): showtable.main([os.path.join(FITS_ROOT, 'data/table.fits'), '--info']) out, err = capsys.readouterr() assert out.splitlines() == ['<Table length=3>', ' name dtype ', '------ -------', 'target bytes20', ' V_mag float32'] def test_stats(capsys): showtable.main([os.path.join(FITS_ROOT, 'data/table.fits'), '--stats']) out, err = capsys.readouterr() expected = ['<Table length=3>', ' name mean std min max ', '------ ------- ------- ---- ----', 'target -- -- -- --', ' V_mag 12.866[0-9]? 1.72111 11.1 15.2'] out = out.splitlines() assert out[:4] == expected[:4] # Here we use re.match as in some cases one of the values above is # platform-dependent. assert re.match(expected[4], out[4]) is not None def test_fits(capsys): showtable.main([os.path.join(FITS_ROOT, 'data/table.fits')]) out, err = capsys.readouterr() assert out.splitlines() == [' target V_mag', '------- -----', 'NGC1001 11.1', 'NGC1002 12.3', 'NGC1003 15.2'] def test_fits_hdu(capsys): from astropy.units import UnitsWarning with pytest.warns(UnitsWarning): showtable.main([ os.path.join(FITS_ROOT, 'data/zerowidth.fits'), '--hdu', 'AIPS OF', ]) out, err = capsys.readouterr() assert out.startswith( ' TIME SOURCE ID ANTENNA NO. SUBARRAY FREQ ID ANT FLAG STATUS 1\n' ' DAYS \n' '---------- --------- ----------- -------- ------- -------- --------\n' '0.14438657 1 10 1 1 4 4\n') def test_csv(capsys): showtable.main([os.path.join(ASCII_ROOT, 'data/simple_csv.csv')]) out, err = capsys.readouterr() assert out.splitlines() == [' a b c ', '--- --- ---', ' 1 2 3', ' 4 5 6'] def test_ascii_format(capsys): showtable.main([os.path.join(ASCII_ROOT, 'data/commented_header.dat'), '--format', 'ascii.commented_header']) out, err = capsys.readouterr() assert out.splitlines() == [' a b c ', '--- --- ---', ' 1 2 3', ' 4 5 6'] def test_ascii_delimiter(capsys): showtable.main([os.path.join(ASCII_ROOT, 'data/simple2.txt'), '--format', 'ascii', '--delimiter', '|']) out, err = capsys.readouterr() assert out.splitlines() == [ "obsid redshift X Y object rad ", "----- -------- ---- ---- ----------- ----", " 3102 0.32 4167 4085 Q1250+568-A 9.0", " 3102 0.32 4706 3916 Q1250+568-B 14.0", " 877 0.22 4378 3892 'Source 82' 12.5", ] def test_votable(capsys): showtable.main([os.path.join(VOTABLE_ROOT, 'data/regression.xml'), '--table-id', 'main_table', '--max-width', '50']) out, err = capsys.readouterr() assert out.splitlines() == [ ' string_test string_test_2 ... bitarray2 ', '----------------- ------------- ... -------------', ' String & test Fixed stri ... True .. False', 'String &amp; test 0123456789 ... -- .. --', ' XXXX XXXX ... -- .. --', ' ... -- .. --', ' ... -- .. --'] def test_max_lines(capsys): showtable.main([os.path.join(ASCII_ROOT, 'data/cds2.dat'), '--format', 'ascii.cds', '--max-lines', '7', '--max-width', '30']) out, err = capsys.readouterr() assert out.splitlines() == [ ' SST ... Note', ' ... ', '--------------- ... ----', '041314.1+281910 ... --', ' ... ... ...', '044427.1+251216 ... --', '044642.6+245903 ... --', 'Length = 215 rows', ] def test_show_dtype(capsys): showtable.main([os.path.join(FITS_ROOT, 'data/table.fits'), '--show-dtype']) out, err = capsys.readouterr() assert out.splitlines() == [ ' target V_mag ', 'bytes20 float32', '------- -------', 'NGC1001 11.1', 'NGC1002 12.3', 'NGC1003 15.2', ] def test_hide_unit(capsys): showtable.main([os.path.join(ASCII_ROOT, 'data/cds.dat'), '--format', 'ascii.cds']) out, err = capsys.readouterr() assert out.splitlines() == [ 'Index RAh RAm RAs DE- DEd DEm DEs Match Class AK Fit ', ' h min s deg arcmin arcsec mag GMsun', '----- --- --- ----- --- --- ------ ------ ----- ----- --- -----', ' 1 3 28 39.09 + 31 6 1.9 -- I* -- 1.35', ] showtable.main([os.path.join(ASCII_ROOT, 'data/cds.dat'), '--format', 'ascii.cds', '--hide-unit']) out, err = capsys.readouterr() assert out.splitlines() == [ 'Index RAh RAm RAs DE- DEd DEm DEs Match Class AK Fit ', '----- --- --- ----- --- --- --- --- ----- ----- --- ----', ' 1 3 28 39.09 + 31 6 1.9 -- I* -- 1.35', ]
cb694989fadad22d2b0a06c068efaf989df28ebb6d14f9987bfa17ff5c03274e
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from astropy.table import Table, Column, QTable, table_helpers, NdarrayMixin, unique from astropy.utils.compat import NUMPY_LT_1_22, NUMPY_LT_1_22_1 from astropy.utils.exceptions import AstropyUserWarning from astropy import time from astropy import units as u from astropy import coordinates def sort_eq(list1, list2): return sorted(list1) == sorted(list2) def test_column_group_by(T1): for masked in (False, True): t1 = QTable(T1, masked=masked) t1a = t1['a'].copy() # Group by a Column (i.e. numpy array) t1ag = t1a.group_by(t1['a']) assert np.all(t1ag.groups.indices == np.array([0, 1, 4, 8])) # Group by a Table t1ag = t1a.group_by(t1['a', 'b']) assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8])) # Group by a numpy structured array t1ag = t1a.group_by(t1['a', 'b'].as_array()) assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8])) def test_table_group_by(T1): """ Test basic table group_by functionality for possible key types and for masked/unmasked tables. """ for masked in (False, True): t1 = QTable(T1, masked=masked) # Group by a single column key specified by name tg = t1.group_by('a') assert np.all(tg.groups.indices == np.array([0, 1, 4, 8])) assert str(tg.groups) == "<TableGroups indices=[0 1 4 8]>" assert str(tg['a'].groups) == "<ColumnGroups indices=[0 1 4 8]>" # Sorted by 'a' and in original order for rest assert tg.pformat() == [' a b c d q ', ' m ', '--- --- --- --- ---', ' 0 a 0.0 4 4.0', ' 1 b 3.0 5 5.0', ' 1 a 2.0 6 6.0', ' 1 a 1.0 7 7.0', ' 2 c 7.0 0 0.0', ' 2 b 5.0 1 1.0', ' 2 b 6.0 2 2.0', ' 2 a 4.0 3 3.0'] assert tg.meta['ta'] == 1 assert tg['c'].meta['a'] == 1 assert tg['c'].description == 'column c' # Group by a table column tg2 = t1.group_by(t1['a']) assert tg.pformat() == tg2.pformat() # Group by two columns spec'd by name for keys in (['a', 'b'], ('a', 'b')): tg = t1.group_by(keys) assert np.all(tg.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8])) # Sorted by 'a', 'b' and in original order for rest assert tg.pformat() == [' a b c d q ', ' m ', '--- --- --- --- ---', ' 0 a 0.0 4 4.0', ' 1 a 2.0 6 6.0', ' 1 a 1.0 7 7.0', ' 1 b 3.0 5 5.0', ' 2 a 4.0 3 3.0', ' 2 b 5.0 1 1.0', ' 2 b 6.0 2 2.0', ' 2 c 7.0 0 0.0'] # Group by a Table tg2 = t1.group_by(t1['a', 'b']) assert tg.pformat() == tg2.pformat() # Group by a structured array tg2 = t1.group_by(t1['a', 'b'].as_array()) assert tg.pformat() == tg2.pformat() # Group by a simple ndarray tg = t1.group_by(np.array([0, 1, 0, 1, 2, 1, 0, 0])) assert np.all(tg.groups.indices == np.array([0, 4, 7, 8])) assert tg.pformat() == [' a b c d q ', ' m ', '--- --- --- --- ---', ' 2 c 7.0 0 0.0', ' 2 b 6.0 2 2.0', ' 1 a 2.0 6 6.0', ' 1 a 1.0 7 7.0', ' 2 b 5.0 1 1.0', ' 2 a 4.0 3 3.0', ' 1 b 3.0 5 5.0', ' 0 a 0.0 4 4.0'] def test_groups_keys(T1): tg = T1.group_by('a') keys = tg.groups.keys assert keys.dtype.names == ('a',) assert np.all(keys['a'] == np.array([0, 1, 2])) tg = T1.group_by(['a', 'b']) keys = tg.groups.keys assert keys.dtype.names == ('a', 'b') assert np.all(keys['a'] == np.array([0, 1, 1, 2, 2, 2])) assert np.all(keys['b'] == np.array(['a', 'a', 'b', 'a', 'b', 'c'])) # Grouping by Column ignores column name tg = T1.group_by(T1['b']) keys = tg.groups.keys assert keys.dtype.names is None def test_groups_iterator(T1): tg = T1.group_by('a') for ii, group in enumerate(tg.groups): assert group.pformat() == tg.groups[ii].pformat() assert group['a'][0] == tg['a'][tg.groups.indices[ii]] def test_grouped_copy(T1): """ Test that copying a table or column copies the groups properly """ for masked in (False, True): t1 = QTable(T1, masked=masked) tg = t1.group_by('a') tgc = tg.copy() assert np.all(tgc.groups.indices == tg.groups.indices) assert np.all(tgc.groups.keys == tg.groups.keys) tac = tg['a'].copy() assert np.all(tac.groups.indices == tg['a'].groups.indices) c1 = t1['a'].copy() gc1 = c1.group_by(t1['a']) gc1c = gc1.copy() assert np.all(gc1c.groups.indices == np.array([0, 1, 4, 8])) def test_grouped_slicing(T1): """ Test that slicing a table removes previous grouping """ for masked in (False, True): t1 = QTable(T1, masked=masked) # Regular slice of a table tg = t1.group_by('a') tg2 = tg[3:5] assert np.all(tg2.groups.indices == np.array([0, len(tg2)])) assert tg2.groups.keys is None def test_group_column_from_table(T1): """ Group a column that is part of a table """ cg = T1['c'].group_by(np.array(T1['a'])) assert np.all(cg.groups.keys == np.array([0, 1, 2])) assert np.all(cg.groups.indices == np.array([0, 1, 4, 8])) def test_table_groups_mask_index(T1): """ Use boolean mask as item in __getitem__ for groups """ for masked in (False, True): t1 = Table(T1, masked=masked).group_by('a') t2 = t1.groups[np.array([True, False, True])] assert len(t2.groups) == 2 assert t2.groups[0].pformat() == t1.groups[0].pformat() assert t2.groups[1].pformat() == t1.groups[2].pformat() assert np.all(t2.groups.keys['a'] == np.array([0, 2])) def test_table_groups_array_index(T1): """ Use numpy array as item in __getitem__ for groups """ for masked in (False, True): t1 = Table(T1, masked=masked).group_by('a') t2 = t1.groups[np.array([0, 2])] assert len(t2.groups) == 2 assert t2.groups[0].pformat() == t1.groups[0].pformat() assert t2.groups[1].pformat() == t1.groups[2].pformat() assert np.all(t2.groups.keys['a'] == np.array([0, 2])) def test_table_groups_slicing(T1): """ Test that slicing table groups works """ for masked in (False, True): t1 = Table(T1, masked=masked).group_by('a') # slice(0, 2) t2 = t1.groups[0:2] assert len(t2.groups) == 2 assert t2.groups[0].pformat() == t1.groups[0].pformat() assert t2.groups[1].pformat() == t1.groups[1].pformat() assert np.all(t2.groups.keys['a'] == np.array([0, 1])) # slice(1, 2) t2 = t1.groups[1:2] assert len(t2.groups) == 1 assert t2.groups[0].pformat() == t1.groups[1].pformat() assert np.all(t2.groups.keys['a'] == np.array([1])) # slice(0, 3, 2) t2 = t1.groups[0:3:2] assert len(t2.groups) == 2 assert t2.groups[0].pformat() == t1.groups[0].pformat() assert t2.groups[1].pformat() == t1.groups[2].pformat() assert np.all(t2.groups.keys['a'] == np.array([0, 2])) def test_grouped_item_access(T1): """ Test that column slicing preserves grouping """ for masked in (False, True): t1 = Table(T1, masked=masked) # Regular slice of a table tg = t1.group_by('a') tgs = tg['a', 'c', 'd'] assert np.all(tgs.groups.keys == tg.groups.keys) assert np.all(tgs.groups.indices == tg.groups.indices) tgsa = tgs.groups.aggregate(np.sum) assert tgsa.pformat() == [' a c d ', '--- ---- ---', ' 0 0.0 4', ' 1 6.0 18', ' 2 22.0 6'] tgs = tg['c', 'd'] assert np.all(tgs.groups.keys == tg.groups.keys) assert np.all(tgs.groups.indices == tg.groups.indices) tgsa = tgs.groups.aggregate(np.sum) assert tgsa.pformat() == [' c d ', '---- ---', ' 0.0 4', ' 6.0 18', '22.0 6'] def test_mutable_operations(T1): """ Operations like adding or deleting a row should removing grouping, but adding or removing or renaming a column should retain grouping. """ for masked in (False, True): t1 = QTable(T1, masked=masked) # add row tg = t1.group_by('a') tg.add_row((0, 'a', 3.0, 4, 4 * u.m)) assert np.all(tg.groups.indices == np.array([0, len(tg)])) assert tg.groups.keys is None # remove row tg = t1.group_by('a') tg.remove_row(4) assert np.all(tg.groups.indices == np.array([0, len(tg)])) assert tg.groups.keys is None # add column tg = t1.group_by('a') indices = tg.groups.indices.copy() tg.add_column(Column(name='e', data=np.arange(len(tg)))) assert np.all(tg.groups.indices == indices) assert np.all(tg['e'].groups.indices == indices) assert np.all(tg['e'].groups.keys == tg.groups.keys) # remove column (not key column) tg = t1.group_by('a') tg.remove_column('b') assert np.all(tg.groups.indices == indices) # Still has original key col names assert tg.groups.keys.dtype.names == ('a',) assert np.all(tg['a'].groups.indices == indices) # remove key column tg = t1.group_by('a') tg.remove_column('a') assert np.all(tg.groups.indices == indices) assert tg.groups.keys.dtype.names == ('a',) assert np.all(tg['b'].groups.indices == indices) # rename key column tg = t1.group_by('a') tg.rename_column('a', 'aa') assert np.all(tg.groups.indices == indices) assert tg.groups.keys.dtype.names == ('a',) assert np.all(tg['aa'].groups.indices == indices) def test_group_by_masked(T1): t1m = QTable(T1, masked=True) t1m['c'].mask[4] = True t1m['d'].mask[5] = True assert t1m.group_by('a').pformat() == [' a b c d q ', ' m ', '--- --- --- --- ---', ' 0 a -- 4 4.0', ' 1 b 3.0 -- 5.0', ' 1 a 2.0 6 6.0', ' 1 a 1.0 7 7.0', ' 2 c 7.0 0 0.0', ' 2 b 5.0 1 1.0', ' 2 b 6.0 2 2.0', ' 2 a 4.0 3 3.0'] def test_group_by_errors(T1): """ Appropriate errors get raised. """ # Bad column name as string with pytest.raises(ValueError): T1.group_by('f') # Bad column names in list with pytest.raises(ValueError): T1.group_by(['f', 'g']) # Wrong length array with pytest.raises(ValueError): T1.group_by(np.array([1, 2])) # Wrong type with pytest.raises(TypeError): T1.group_by(None) # Masked key column t1 = QTable(T1, masked=True) t1['a'].mask[4] = True with pytest.raises(ValueError): t1.group_by('a') def test_groups_keys_meta(T1): """ Make sure the keys meta['grouped_by_table_cols'] is working. """ # Group by column in this table tg = T1.group_by('a') assert tg.groups.keys.meta['grouped_by_table_cols'] is True assert tg['c'].groups.keys.meta['grouped_by_table_cols'] is True assert tg.groups[1].groups.keys.meta['grouped_by_table_cols'] is True assert (tg['d'].groups[np.array([False, True, True])] .groups.keys.meta['grouped_by_table_cols'] is True) # Group by external Table tg = T1.group_by(T1['a', 'b']) assert tg.groups.keys.meta['grouped_by_table_cols'] is False assert tg['c'].groups.keys.meta['grouped_by_table_cols'] is False assert tg.groups[1].groups.keys.meta['grouped_by_table_cols'] is False # Group by external numpy array tg = T1.group_by(T1['a', 'b'].as_array()) assert not hasattr(tg.groups.keys, 'meta') assert not hasattr(tg['c'].groups.keys, 'meta') # Group by Column tg = T1.group_by(T1['a']) assert 'grouped_by_table_cols' not in tg.groups.keys.meta assert 'grouped_by_table_cols' not in tg['c'].groups.keys.meta def test_table_aggregate(T1): """ Aggregate a table """ # Table with only summable cols t1 = T1['a', 'c', 'd'] tg = t1.group_by('a') tga = tg.groups.aggregate(np.sum) assert tga.pformat() == [' a c d ', '--- ---- ---', ' 0 0.0 4', ' 1 6.0 18', ' 2 22.0 6'] # Reverts to default groups assert np.all(tga.groups.indices == np.array([0, 3])) assert tga.groups.keys is None # metadata survives assert tga.meta['ta'] == 1 assert tga['c'].meta['a'] == 1 assert tga['c'].description == 'column c' # Aggregate with np.sum with masked elements. This results # in one group with no elements, hence a nan result and conversion # to float for the 'd' column. t1m = QTable(T1, masked=True) t1m['c'].mask[4:6] = True t1m['d'].mask[4:6] = True tg = t1m.group_by('a') with pytest.warns(UserWarning, match="converting a masked element to nan"): tga = tg.groups.aggregate(np.sum) assert tga.pformat() == [' a c d q ', ' m ', '--- ---- ---- ----', ' 0 nan nan 4.0', ' 1 3.0 13.0 18.0', ' 2 22.0 6.0 6.0'] # Aggregrate with np.sum with masked elements, but where every # group has at least one remaining (unmasked) element. Then # the int column stays as an int. t1m = QTable(t1, masked=True) t1m['c'].mask[5] = True t1m['d'].mask[5] = True tg = t1m.group_by('a') tga = tg.groups.aggregate(np.sum) assert tga.pformat() == [' a c d ', '--- ---- ---', ' 0 0.0 4', ' 1 3.0 13', ' 2 22.0 6'] # Aggregate with a column type that cannot by supplied to the aggregating # function. This raises a warning but still works. tg = T1.group_by('a') with pytest.warns(AstropyUserWarning, match="Cannot aggregate column"): tga = tg.groups.aggregate(np.sum) assert tga.pformat() == [' a c d q ', ' m ', '--- ---- --- ----', ' 0 0.0 4 4.0', ' 1 6.0 18 18.0', ' 2 22.0 6 6.0'] def test_table_aggregate_reduceat(T1): """ Aggregate table with functions which have a reduceat method """ # Comparison functions without reduceat def np_mean(x): return np.mean(x) def np_sum(x): return np.sum(x) def np_add(x): return np.add(x) # Table with only summable cols t1 = T1['a', 'c', 'd'] tg = t1.group_by('a') # Comparison tga_r = tg.groups.aggregate(np.sum) tga_a = tg.groups.aggregate(np.add) tga_n = tg.groups.aggregate(np_sum) assert np.all(tga_r == tga_n) assert np.all(tga_a == tga_n) assert tga_n.pformat() == [' a c d ', '--- ---- ---', ' 0 0.0 4', ' 1 6.0 18', ' 2 22.0 6'] tga_r = tg.groups.aggregate(np.mean) tga_n = tg.groups.aggregate(np_mean) assert np.all(tga_r == tga_n) assert tga_n.pformat() == [' a c d ', '--- --- ---', ' 0 0.0 4.0', ' 1 2.0 6.0', ' 2 5.5 1.5'] # Binary ufunc np_add should raise warning without reduceat t2 = T1['a', 'c'] tg = t2.group_by('a') with pytest.warns(AstropyUserWarning, match="Cannot aggregate column"): tga = tg.groups.aggregate(np_add) assert tga.pformat() == [' a ', '---', ' 0', ' 1', ' 2'] def test_column_aggregate(T1): """ Aggregate a single table column """ for masked in (False, True): tg = QTable(T1, masked=masked).group_by('a') tga = tg['c'].groups.aggregate(np.sum) assert tga.pformat() == [' c ', '----', ' 0.0', ' 6.0', '22.0'] @pytest.mark.skipif(not NUMPY_LT_1_22 and NUMPY_LT_1_22_1, reason='https://github.com/numpy/numpy/issues/20699') def test_column_aggregate_f8(): """https://github.com/astropy/astropy/issues/12706""" # Just want to make sure it does not crash again. for masked in (False, True): tg = Table({'a': np.arange(2, dtype='>f8')}, masked=masked).group_by('a') tga = tg['a'].groups.aggregate(np.sum) assert tga.pformat() == [' a ', '---', '0.0', '1.0'] def test_table_filter(): """ Table groups filtering """ def all_positive(table, key_colnames): colnames = [name for name in table.colnames if name not in key_colnames] for colname in colnames: if np.any(table[colname] < 0): return False return True # Negative value in 'a' column should not filter because it is a key col t = Table.read([' a c d', ' -2 7.0 0', ' -2 5.0 1', ' 0 0.0 4', ' 1 3.0 5', ' 1 2.0 -6', ' 1 1.0 7', ' 3 3.0 5', ' 3 -2.0 6', ' 3 1.0 7', ], format='ascii') tg = t.group_by('a') t2 = tg.groups.filter(all_positive) assert t2.groups[0].pformat() == [' a c d ', '--- --- ---', ' -2 7.0 0', ' -2 5.0 1'] assert t2.groups[1].pformat() == [' a c d ', '--- --- ---', ' 0 0.0 4'] def test_column_filter(): """ Table groups filtering """ def all_positive(column): if np.any(column < 0): return False return True # Negative value in 'a' column should not filter because it is a key col t = Table.read([' a c d', ' -2 7.0 0', ' -2 5.0 1', ' 0 0.0 4', ' 1 3.0 5', ' 1 2.0 -6', ' 1 1.0 7', ' 3 3.0 5', ' 3 -2.0 6', ' 3 1.0 7', ], format='ascii') tg = t.group_by('a') c2 = tg['c'].groups.filter(all_positive) assert len(c2.groups) == 3 assert c2.groups[0].pformat() == [' c ', '---', '7.0', '5.0'] assert c2.groups[1].pformat() == [' c ', '---', '0.0'] assert c2.groups[2].pformat() == [' c ', '---', '3.0', '2.0', '1.0'] def test_group_mixins(): """ Test grouping a table with mixin columns """ # Setup mixins idx = np.arange(4) x = np.array([3., 1., 2., 1.]) q = x * u.m lon = coordinates.Longitude(x * u.deg) lat = coordinates.Latitude(x * u.deg) # For Time do J2000.0 + few * 0.1 ns (this requires > 64 bit precision) tm = time.Time(2000, format='jyear') + time.TimeDelta(x * 1e-10, format='sec') sc = coordinates.SkyCoord(ra=lon, dec=lat) aw = table_helpers.ArrayWrapper(x) nd = np.array([(3, 'c'), (1, 'a'), (2, 'b'), (1, 'a')], dtype='<i4,|S1').view(NdarrayMixin) qt = QTable([idx, x, q, lon, lat, tm, sc, aw, nd], names=['idx', 'x', 'q', 'lon', 'lat', 'tm', 'sc', 'aw', 'nd']) # Test group_by with each supported mixin type mixin_keys = ['x', 'q', 'lon', 'lat', 'tm', 'sc', 'aw', 'nd'] for key in mixin_keys: qtg = qt.group_by(key) # Test that it got the sort order correct assert np.all(qtg['idx'] == [1, 3, 2, 0]) # Test that the groups are right # Note: skip testing SkyCoord column because that doesn't have equality for name in ['x', 'q', 'lon', 'lat', 'tm', 'aw', 'nd']: assert np.all(qt[name][[1, 3]] == qtg.groups[0][name]) assert np.all(qt[name][[2]] == qtg.groups[1][name]) assert np.all(qt[name][[0]] == qtg.groups[2][name]) # Test that unique also works with mixins since most of the work is # done with group_by(). This is using *every* mixin as key. uqt = unique(qt, keys=mixin_keys) assert len(uqt) == 3 assert np.all(uqt['idx'] == [1, 2, 0]) assert np.all(uqt['x'] == [1., 2., 3.]) # Column group_by() with mixins idxg = qt['idx'].group_by(qt[mixin_keys]) assert np.all(idxg == [1, 3, 2, 0]) @pytest.mark.parametrize( 'col', [time.TimeDelta([1, 2], format='sec'), time.Time([1, 2], format='cxcsec'), coordinates.SkyCoord([1, 2], [3, 4], unit='deg,deg')]) def test_group_mixins_unsupported(col): """Test that aggregating unsupported mixins produces a warning only""" t = Table([[1, 1], [3, 4], col], names=['a', 'b', 'mix']) tg = t.group_by('a') with pytest.warns(AstropyUserWarning, match="Cannot aggregate column 'mix'"): tg.groups.aggregate(np.sum)
459581c14fa9545a0d308eca95131927fd126fd9e6c6c40e277f6eb6ba2103f6
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ All of the pytest fixtures used by astropy.table are defined here. `conftest.py` is a "special" module name for pytest that is always imported, but is not looked in for tests, and it is the recommended place to put fixtures that are shared between modules. These fixtures can not be defined in a module by a different name and still be shared between modules. """ from copy import deepcopy from collections import OrderedDict import pickle import pytest import numpy as np from astropy import table from astropy.table import Table, QTable from astropy.table.table_helpers import ArrayWrapper from astropy import time from astropy import units as u from astropy import coordinates from astropy.table import pprint @pytest.fixture(params=[table.Column, table.MaskedColumn]) def Column(request): # Fixture to run all the Column tests for both an unmasked (ndarray) # and masked (MaskedArray) column. return request.param class MaskedTable(table.Table): def __init__(self, *args, **kwargs): kwargs['masked'] = True table.Table.__init__(self, *args, **kwargs) class MyRow(table.Row): pass class MyColumn(table.Column): pass class MyMaskedColumn(table.MaskedColumn): pass class MyTableColumns(table.TableColumns): pass class MyTableFormatter(pprint.TableFormatter): pass class MyTable(table.Table): Row = MyRow Column = MyColumn MaskedColumn = MyMaskedColumn TableColumns = MyTableColumns TableFormatter = MyTableFormatter # Fixture to run all the Column tests for both an unmasked (ndarray) # and masked (MaskedArray) column. @pytest.fixture(params=['unmasked', 'masked', 'subclass']) def table_types(request): class TableTypes: def __init__(self, request): if request.param == 'unmasked': self.Table = table.Table self.Column = table.Column elif request.param == 'masked': self.Table = MaskedTable self.Column = table.MaskedColumn elif request.param == 'subclass': self.Table = MyTable self.Column = MyColumn return TableTypes(request) # Fixture to run all the Column tests for both an unmasked (ndarray) # and masked (MaskedArray) column. @pytest.fixture(params=[False, True]) def table_data(request): class TableData: def __init__(self, request): self.Table = MaskedTable if request.param else table.Table self.Column = table.MaskedColumn if request.param else table.Column self.COLS = [ self.Column(name='a', data=[1, 2, 3], description='da', format='%i', meta={'ma': 1}, unit='ua'), self.Column(name='b', data=[4, 5, 6], description='db', format='%d', meta={'mb': 1}, unit='ub'), self.Column(name='c', data=[7, 8, 9], description='dc', format='%f', meta={'mc': 1}, unit='ub')] self.DATA = self.Table(self.COLS) return TableData(request) class SubclassTable(table.Table): pass @pytest.fixture(params=[True, False]) def tableclass(request): return table.Table if request.param else SubclassTable @pytest.fixture(params=list(range(0, pickle.HIGHEST_PROTOCOL + 1))) def protocol(request): """ Fixture to run all the tests for all available pickle protocols. """ return request.param # Fixture to run all tests for both an unmasked (ndarray) and masked # (MaskedArray) column. @pytest.fixture(params=[False, True]) def table_type(request): return MaskedTable if request.param else table.Table # Stuff for testing mixin columns MIXIN_COLS = {'quantity': [0, 1, 2, 3] * u.m, 'longitude': coordinates.Longitude([0., 1., 5., 6.] * u.deg, wrap_angle=180. * u.deg), 'latitude': coordinates.Latitude([5., 6., 10., 11.] * u.deg), 'time': time.Time([2000, 2001, 2002, 2003], format='jyear'), 'timedelta': time.TimeDelta([1, 2, 3, 4], format='jd'), 'skycoord': coordinates.SkyCoord(ra=[0, 1, 2, 3] * u.deg, dec=[0, 1, 2, 3] * u.deg), 'sphericalrep': coordinates.SphericalRepresentation( [0, 1, 2, 3]*u.deg, [0, 1, 2, 3]*u.deg, 1*u.kpc), 'cartesianrep': coordinates.CartesianRepresentation( [0, 1, 2, 3]*u.pc, [4, 5, 6, 7]*u.pc, [9, 8, 8, 6]*u.pc), 'sphericaldiff': coordinates.SphericalCosLatDifferential( [0, 1, 2, 3]*u.mas/u.yr, [0, 1, 2, 3]*u.mas/u.yr, 10*u.km/u.s), 'arraywrap': ArrayWrapper([0, 1, 2, 3]), 'arrayswap': ArrayWrapper(np.arange(4, dtype='i').byteswap().newbyteorder()), 'ndarraylil': np.array([(7, 'a'), (8, 'b'), (9, 'c'), (9, 'c')], dtype='<i4,|S1').view(table.NdarrayMixin), 'ndarraybig': np.array([(7, 'a'), (8, 'b'), (9, 'c'), (9, 'c')], dtype='>i4,|S1').view(table.NdarrayMixin), } MIXIN_COLS['earthlocation'] = coordinates.EarthLocation( lon=MIXIN_COLS['longitude'], lat=MIXIN_COLS['latitude'], height=MIXIN_COLS['quantity']) MIXIN_COLS['sphericalrepdiff'] = coordinates.SphericalRepresentation( MIXIN_COLS['sphericalrep'], differentials=MIXIN_COLS['sphericaldiff']) @pytest.fixture(params=sorted(MIXIN_COLS)) def mixin_cols(request): """ Fixture to return a set of columns for mixin testing which includes an index column 'i', two string cols 'a', 'b' (for joins etc), and one of the available mixin column types. """ cols = OrderedDict() mixin_cols = deepcopy(MIXIN_COLS) cols['i'] = table.Column([0, 1, 2, 3], name='i') cols['a'] = table.Column(['a', 'b', 'b', 'c'], name='a') cols['b'] = table.Column(['b', 'c', 'a', 'd'], name='b') cols['m'] = mixin_cols[request.param] return cols @pytest.fixture(params=[False, True]) def T1(request): T = QTable.read([' a b c d', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 2 b 6.0 2', ' 2 a 4.0 3', ' 0 a 0.0 4', ' 1 b 3.0 5', ' 1 a 2.0 6', ' 1 a 1.0 7', ], format='ascii') T['q'] = np.arange(len(T)) * u.m T.meta.update({'ta': 1}) T['c'].meta.update({'a': 1}) T['c'].description = 'column c' if request.param: T.add_index('a') return T @pytest.fixture(params=[Table, QTable]) def operation_table_type(request): return request.param
62db3e1d6e21f8fb15b25b032edb54d45f685d05b18a41d0f2430c731637aedd
# Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy.table.table_helpers import ArrayWrapper from astropy.coordinates.earth import EarthLocation from astropy.units.quantity import Quantity from collections import OrderedDict from contextlib import nullcontext import pytest import numpy as np from astropy.table import Table, QTable, TableMergeError, Column, MaskedColumn, NdarrayMixin from astropy.table.operations import _get_out_class, join_skycoord, join_distance from astropy import units as u from astropy.utils import metadata from astropy.utils.metadata import MergeConflictError from astropy import table from astropy.time import Time, TimeDelta from astropy.coordinates import (SkyCoord, SphericalRepresentation, UnitSphericalRepresentation, CartesianRepresentation, BaseRepresentationOrDifferential, search_around_3d) from astropy.coordinates.tests.test_representation import representation_equal from astropy.coordinates.tests.helper import skycoord_equal from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa def sort_eq(list1, list2): return sorted(list1) == sorted(list2) def check_mask(col, exp_mask): """Check that col.mask == exp_mask""" if hasattr(col, 'mask'): # Coerce expected mask into dtype of col.mask. In particular this is # needed for types like EarthLocation where the mask is a structured # array. exp_mask = np.array(exp_mask).astype(col.mask.dtype) out = np.all(col.mask == exp_mask) else: # With no mask the check is OK if all the expected mask values # are False (i.e. no auto-conversion to MaskedQuantity if it was # not required by the join). out = np.all(exp_mask == False) return out class TestJoin(): def _setup(self, t_cls=Table): lines1 = [' a b c ', ' 0 foo L1', ' 1 foo L2', ' 1 bar L3', ' 2 bar L4'] lines2 = [' a b d ', ' 1 foo R1', ' 1 foo R2', ' 2 bar R3', ' 4 bar R4'] self.t1 = t_cls.read(lines1, format='ascii') self.t2 = t_cls.read(lines2, format='ascii') self.t3 = t_cls(self.t2, copy=True) self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])) self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) self.t3.meta.update(OrderedDict([('b', 3), ('c', [1, 2]), ('d', 2), ('a', 1)])) self.meta_merge = OrderedDict([('b', [1, 2, 3, 4]), ('c', {'a': 1, 'b': 1}), ('d', 1), ('a', 1)]) def test_table_meta_merge(self, operation_table_type): self._setup(operation_table_type) out = table.join(self.t1, self.t2, join_type='inner') assert out.meta == self.meta_merge def test_table_meta_merge_conflict(self, operation_table_type): self._setup(operation_table_type) with pytest.warns(metadata.MergeConflictWarning) as w: out = table.join(self.t1, self.t3, join_type='inner') assert len(w) == 3 assert out.meta == self.t3.meta with pytest.warns(metadata.MergeConflictWarning) as w: out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='warn') assert len(w) == 3 assert out.meta == self.t3.meta out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='silent') assert out.meta == self.t3.meta with pytest.raises(MergeConflictError): out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='error') with pytest.raises(ValueError): out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='nonsense') def test_both_unmasked_inner(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 # Basic join with default parameters (inner join on common keys) t12 = table.join(t1, t2) assert type(t12) is operation_table_type assert type(t12['a']) is type(t1['a']) # noqa assert type(t12['b']) is type(t1['b']) # noqa assert type(t12['c']) is type(t1['c']) # noqa assert type(t12['d']) is type(t2['d']) # noqa assert t12.masked is False assert sort_eq(t12.pformat(), [' a b c d ', '--- --- --- ---', ' 1 foo L2 R1', ' 1 foo L2 R2', ' 2 bar L4 R3']) # Table meta merged properly assert t12.meta == self.meta_merge def test_both_unmasked_left_right_outer(self, operation_table_type): if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 # Left join t12 = table.join(t1, t2, join_type='left') assert t12.has_masked_columns is True assert t12.masked is False for name in ('a', 'b', 'c'): assert type(t12[name]) is Column assert type(t12['d']) is MaskedColumn assert sort_eq(t12.pformat(), [' a b c d ', '--- --- --- ---', ' 0 foo L1 --', ' 1 bar L3 --', ' 1 foo L2 R1', ' 1 foo L2 R2', ' 2 bar L4 R3']) # Right join t12 = table.join(t1, t2, join_type='right') assert t12.has_masked_columns is True assert t12.masked is False assert sort_eq(t12.pformat(), [' a b c d ', '--- --- --- ---', ' 1 foo L2 R1', ' 1 foo L2 R2', ' 2 bar L4 R3', ' 4 bar -- R4']) # Outer join t12 = table.join(t1, t2, join_type='outer') assert t12.has_masked_columns is True assert t12.masked is False assert sort_eq(t12.pformat(), [' a b c d ', '--- --- --- ---', ' 0 foo L1 --', ' 1 bar L3 --', ' 1 foo L2 R1', ' 1 foo L2 R2', ' 2 bar L4 R3', ' 4 bar -- R4']) # Check that the common keys are 'a', 'b' t12a = table.join(t1, t2, join_type='outer') t12b = table.join(t1, t2, join_type='outer', keys=['a', 'b']) assert np.all(t12a.as_array() == t12b.as_array()) def test_both_unmasked_single_key_inner(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 # Inner join on 'a' column t12 = table.join(t1, t2, keys='a') assert type(t12) is operation_table_type assert type(t12['a']) is type(t1['a']) # noqa assert type(t12['b_1']) is type(t1['b']) # noqa assert type(t12['c']) is type(t1['c']) # noqa assert type(t12['b_2']) is type(t2['b']) # noqa assert type(t12['d']) is type(t2['d']) # noqa assert t12.masked is False assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ', '--- --- --- --- ---', ' 1 foo L2 foo R1', ' 1 foo L2 foo R2', ' 1 bar L3 foo R1', ' 1 bar L3 foo R2', ' 2 bar L4 bar R3']) def test_both_unmasked_single_key_left_right_outer(self, operation_table_type): if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 # Left join t12 = table.join(t1, t2, join_type='left', keys='a') assert t12.has_masked_columns is True assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ', '--- --- --- --- ---', ' 0 foo L1 -- --', ' 1 foo L2 foo R1', ' 1 foo L2 foo R2', ' 1 bar L3 foo R1', ' 1 bar L3 foo R2', ' 2 bar L4 bar R3']) # Right join t12 = table.join(t1, t2, join_type='right', keys='a') assert t12.has_masked_columns is True assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ', '--- --- --- --- ---', ' 1 foo L2 foo R1', ' 1 foo L2 foo R2', ' 1 bar L3 foo R1', ' 1 bar L3 foo R2', ' 2 bar L4 bar R3', ' 4 -- -- bar R4']) # Outer join t12 = table.join(t1, t2, join_type='outer', keys='a') assert t12.has_masked_columns is True assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ', '--- --- --- --- ---', ' 0 foo L1 -- --', ' 1 foo L2 foo R1', ' 1 foo L2 foo R2', ' 1 bar L3 foo R1', ' 1 bar L3 foo R2', ' 2 bar L4 bar R3', ' 4 -- -- bar R4']) def test_masked_unmasked(self, operation_table_type): if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') self._setup(operation_table_type) t1 = self.t1 t1m = operation_table_type(self.t1, masked=True) t2 = self.t2 # Result table is never masked t1m2 = table.join(t1m, t2, join_type='inner') assert t1m2.masked is False # Result should match non-masked result t12 = table.join(t1, t2) assert np.all(t12.as_array() == np.array(t1m2)) # Mask out some values in left table and make sure they propagate t1m['b'].mask[1] = True t1m['c'].mask[2] = True t1m2 = table.join(t1m, t2, join_type='inner', keys='a') assert sort_eq(t1m2.pformat(), [' a b_1 c b_2 d ', '--- --- --- --- ---', ' 1 -- L2 foo R1', ' 1 -- L2 foo R2', ' 1 bar -- foo R1', ' 1 bar -- foo R2', ' 2 bar L4 bar R3']) t21m = table.join(t2, t1m, join_type='inner', keys='a') assert sort_eq(t21m.pformat(), [' a b_1 d b_2 c ', '--- --- --- --- ---', ' 1 foo R2 -- L2', ' 1 foo R2 bar --', ' 1 foo R1 -- L2', ' 1 foo R1 bar --', ' 2 bar R3 bar L4']) def test_masked_masked(self, operation_table_type): self._setup(operation_table_type) """Two masked tables""" if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') t1 = self.t1 t1m = operation_table_type(self.t1, masked=True) t2 = self.t2 t2m = operation_table_type(self.t2, masked=True) # Result table is never masked but original column types are preserved t1m2m = table.join(t1m, t2m, join_type='inner') assert t1m2m.masked is False for col in t1m2m.itercols(): assert type(col) is MaskedColumn # Result should match non-masked result t12 = table.join(t1, t2) assert np.all(t12.as_array() == np.array(t1m2m)) # Mask out some values in both tables and make sure they propagate t1m['b'].mask[1] = True t1m['c'].mask[2] = True t2m['d'].mask[2] = True t1m2m = table.join(t1m, t2m, join_type='inner', keys='a') assert sort_eq(t1m2m.pformat(), [' a b_1 c b_2 d ', '--- --- --- --- ---', ' 1 -- L2 foo R1', ' 1 -- L2 foo R2', ' 1 bar -- foo R1', ' 1 bar -- foo R2', ' 2 bar L4 bar --']) def test_classes(self): """Ensure that classes and subclasses get through as expected""" class MyCol(Column): pass class MyMaskedCol(MaskedColumn): pass t1 = Table() t1['a'] = MyCol([1]) t1['b'] = MyCol([2]) t1['c'] = MyMaskedCol([3]) t2 = Table() t2['a'] = Column([1, 2]) t2['d'] = MyCol([3, 4]) t2['e'] = MyMaskedCol([5, 6]) t12 = table.join(t1, t2, join_type='inner') for name, exp_type in (('a', MyCol), ('b', MyCol), ('c', MyMaskedCol), ('d', MyCol), ('e', MyMaskedCol)): assert type(t12[name] is exp_type) t21 = table.join(t2, t1, join_type='left') # Note col 'b' gets upgraded from MyCol to MaskedColumn since it needs to be # masked, but col 'c' stays since MyMaskedCol supports masking. for name, exp_type in (('a', MyCol), ('b', MaskedColumn), ('c', MyMaskedCol), ('d', MyCol), ('e', MyMaskedCol)): assert type(t21[name] is exp_type) def test_col_rename(self, operation_table_type): self._setup(operation_table_type) """ Test auto col renaming when there is a conflict. Use non-default values of uniq_col_name and table_names. """ t1 = self.t1 t2 = self.t2 t12 = table.join(t1, t2, uniq_col_name='x_{table_name}_{col_name}_y', table_names=['L', 'R'], keys='a') assert t12.colnames == ['a', 'x_L_b_y', 'c', 'x_R_b_y', 'd'] def test_rename_conflict(self, operation_table_type): self._setup(operation_table_type) """ Test that auto-column rename fails because of a conflict with an existing column """ t1 = self.t1 t2 = self.t2 t1['b_1'] = 1 # Add a new column b_1 that will conflict with auto-rename with pytest.raises(TableMergeError): table.join(t1, t2, keys='a') def test_missing_keys(self, operation_table_type): self._setup(operation_table_type) """Merge on a key column that doesn't exist""" t1 = self.t1 t2 = self.t2 with pytest.raises(TableMergeError): table.join(t1, t2, keys=['a', 'not there']) def test_bad_join_type(self, operation_table_type): self._setup(operation_table_type) """Bad join_type input""" t1 = self.t1 t2 = self.t2 with pytest.raises(ValueError): table.join(t1, t2, join_type='illegal value') def test_no_common_keys(self, operation_table_type): self._setup(operation_table_type) """Merge tables with no common keys""" t1 = self.t1 t2 = self.t2 del t1['a'] del t1['b'] del t2['a'] del t2['b'] with pytest.raises(TableMergeError): table.join(t1, t2) def test_masked_key_column(self, operation_table_type): self._setup(operation_table_type) """Merge on a key column that has a masked element""" if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') t1 = self.t1 t2 = operation_table_type(self.t2, masked=True) table.join(t1, t2) # OK t2['a'].mask[0] = True with pytest.raises(TableMergeError): table.join(t1, t2) def test_col_meta_merge(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t2.rename_column('d', 'c') # force col conflict and renaming meta1 = OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]) meta2 = OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]) # Key col 'a', should first value ('cm') t1['a'].unit = 'cm' t2['a'].unit = 'm' # Key col 'b', take first value 't1_b' t1['b'].info.description = 't1_b' # Key col 'b', take first non-empty value 't1_b' t2['b'].info.format = '%6s' # Key col 'a', should be merged meta t1['a'].info.meta = meta1 t2['a'].info.meta = meta2 # Key col 'b', should be meta2 t2['b'].info.meta = meta2 # All these should pass through t1['c'].info.format = '%3s' t1['c'].info.description = 't1_c' t2['c'].info.format = '%6s' t2['c'].info.description = 't2_c' if operation_table_type is Table: ctx = pytest.warns(metadata.MergeConflictWarning, match=r"In merged column 'a' the 'unit' attribute does not match \(cm != m\)") # noqa else: ctx = nullcontext() with ctx: t12 = table.join(t1, t2, keys=['a', 'b']) assert t12['a'].unit == 'm' assert t12['b'].info.description == 't1_b' assert t12['b'].info.format == '%6s' assert t12['a'].info.meta == self.meta_merge assert t12['b'].info.meta == meta2 assert t12['c_1'].info.format == '%3s' assert t12['c_1'].info.description == 't1_c' assert t12['c_2'].info.format == '%6s' assert t12['c_2'].info.description == 't2_c' def test_join_multidimensional(self, operation_table_type): self._setup(operation_table_type) # Regression test for #2984, which was an issue where join did not work # on multi-dimensional columns. t1 = operation_table_type() t1['a'] = [1, 2, 3] t1['b'] = np.ones((3, 4)) t2 = operation_table_type() t2['a'] = [1, 2, 3] t2['c'] = [4, 5, 6] t3 = table.join(t1, t2) np.testing.assert_allclose(t3['a'], t1['a']) np.testing.assert_allclose(t3['b'], t1['b']) np.testing.assert_allclose(t3['c'], t2['c']) def test_join_multidimensional_masked(self, operation_table_type): self._setup(operation_table_type) """ Test for outer join with multidimensional columns where masking is required. (Issue #4059). """ if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') a = table.MaskedColumn([1, 2, 3], name='a') a2 = table.Column([1, 3, 4], name='a') b = table.MaskedColumn([[1, 2], [3, 4], [5, 6]], name='b', mask=[[1, 0], [0, 1], [0, 0]]) c = table.Column([[1, 1], [2, 2], [3, 3]], name='c') t1 = operation_table_type([a, b]) t2 = operation_table_type([a2, c]) t12 = table.join(t1, t2, join_type='inner') assert np.all(t12['b'].mask == [[True, False], [False, False]]) assert not hasattr(t12['c'], 'mask') t12 = table.join(t1, t2, join_type='outer') assert np.all(t12['b'].mask == [[True, False], [False, True], [False, False], [True, True]]) assert np.all(t12['c'].mask == [[False, False], [True, True], [False, False], [False, False]]) def test_mixin_functionality(self, mixin_cols): col = mixin_cols['m'] cls_name = type(col).__name__ len_col = len(col) idx = np.arange(len_col) t1 = table.QTable([idx, col], names=['idx', 'm1']) t2 = table.QTable([idx, col], names=['idx', 'm2']) # Set up join mismatches for different join_type cases t1 = t1[[0, 1, 3]] t2 = t2[[0, 2, 3]] # Test inner join, which works for all mixin_cols out = table.join(t1, t2, join_type='inner') assert len(out) == 2 assert out['m2'].__class__ is col.__class__ assert np.all(out['idx'] == [0, 3]) if cls_name == 'SkyCoord': # SkyCoord doesn't support __eq__ so use our own assert skycoord_equal(out['m1'], col[[0, 3]]) assert skycoord_equal(out['m2'], col[[0, 3]]) elif 'Repr' in cls_name or 'Diff' in cls_name: assert np.all(representation_equal(out['m1'], col[[0, 3]])) assert np.all(representation_equal(out['m2'], col[[0, 3]])) else: assert np.all(out['m1'] == col[[0, 3]]) assert np.all(out['m2'] == col[[0, 3]]) # Check for left, right, outer join which requires masking. Works for # the listed mixins classes. if isinstance(col, (Quantity, Time, TimeDelta)): out = table.join(t1, t2, join_type='left') assert len(out) == 3 assert np.all(out['idx'] == [0, 1, 3]) assert np.all(out['m1'] == t1['m1']) assert np.all(out['m2'] == t2['m2']) check_mask(out['m1'], [False, False, False]) check_mask(out['m2'], [False, True, False]) out = table.join(t1, t2, join_type='right') assert len(out) == 3 assert np.all(out['idx'] == [0, 2, 3]) assert np.all(out['m1'] == t1['m1']) assert np.all(out['m2'] == t2['m2']) check_mask(out['m1'], [False, True, False]) check_mask(out['m2'], [False, False, False]) out = table.join(t1, t2, join_type='outer') assert len(out) == 4 assert np.all(out['idx'] == [0, 1, 2, 3]) assert np.all(out['m1'] == col) assert np.all(out['m2'] == col) assert check_mask(out['m1'], [False, False, True, False]) assert check_mask(out['m2'], [False, True, False, False]) else: # Otherwise make sure it fails with the right exception message for join_type in ('outer', 'left', 'right'): with pytest.raises(NotImplementedError) as err: table.join(t1, t2, join_type=join_type) assert ('join requires masking' in str(err.value) or 'join unavailable' in str(err.value)) def test_cartesian_join(self, operation_table_type): t1 = Table(rows=[(1, 'a'), (2, 'b')], names=['a', 'b']) t2 = Table(rows=[(3, 'c'), (4, 'd')], names=['a', 'c']) t12 = table.join(t1, t2, join_type='cartesian') assert t1.colnames == ['a', 'b'] assert t2.colnames == ['a', 'c'] assert len(t12) == len(t1) * len(t2) assert str(t12).splitlines() == [ 'a_1 b a_2 c ', '--- --- --- ---', ' 1 a 3 c', ' 1 a 4 d', ' 2 b 3 c', ' 2 b 4 d'] with pytest.raises(ValueError, match='cannot supply keys for a cartesian join'): t12 = table.join(t1, t2, join_type='cartesian', keys='a') @pytest.mark.skipif('not HAS_SCIPY') def test_join_with_join_skycoord_sky(self): sc1 = SkyCoord([0, 1, 1.1, 2], [0, 0, 0, 0], unit='deg') sc2 = SkyCoord([0.5, 1.05, 2.1], [0, 0, 0], unit='deg') t1 = Table([sc1], names=['sc']) t2 = Table([sc2], names=['sc']) t12 = table.join(t1, t2, join_funcs={'sc': join_skycoord(0.2 * u.deg)}) exp = ['sc_id sc_1 sc_2 ', ' deg,deg deg,deg ', '----- ------- --------', ' 1 1.0,0.0 1.05,0.0', ' 1 1.1,0.0 1.05,0.0', ' 2 2.0,0.0 2.1,0.0'] assert str(t12).splitlines() == exp @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.parametrize('distance_func', ['search_around_3d', search_around_3d]) def test_join_with_join_skycoord_3d(self, distance_func): sc1 = SkyCoord([0, 1, 1.1, 2]*u.deg, [0, 0, 0, 0]*u.deg, [1, 1, 2, 1]*u.m) sc2 = SkyCoord([0.5, 1.05, 2.1]*u.deg, [0, 0, 0]*u.deg, [1, 1, 1]*u.m) t1 = Table([sc1], names=['sc']) t2 = Table([sc2], names=['sc']) join_func = join_skycoord(np.deg2rad(0.2) * u.m, distance_func=distance_func) t12 = table.join(t1, t2, join_funcs={'sc': join_func}) exp = ['sc_id sc_1 sc_2 ', ' deg,deg,m deg,deg,m ', '----- ----------- ------------', ' 1 1.0,0.0,1.0 1.05,0.0,1.0', ' 2 2.0,0.0,1.0 2.1,0.0,1.0'] assert str(t12).splitlines() == exp @pytest.mark.skipif('not HAS_SCIPY') def test_join_with_join_distance_1d(self): c1 = [0, 1, 1.1, 2] c2 = [0.5, 1.05, 2.1] t1 = Table([c1], names=['col']) t2 = Table([c2], names=['col']) join_func = join_distance(0.2, kdtree_args={'leafsize': 32}, query_args={'p': 2}) t12 = table.join(t1, t2, join_type='outer', join_funcs={'col': join_func}) exp = ['col_id col_1 col_2', '------ ----- -----', ' 1 1.0 1.05', ' 1 1.1 1.05', ' 2 2.0 2.1', ' 3 0.0 --', ' 4 -- 0.5'] assert str(t12).splitlines() == exp @pytest.mark.skipif('not HAS_SCIPY') def test_join_with_join_distance_1d_multikey(self): from astropy.table.operations import _apply_join_funcs c1 = [0, 1, 1.1, 1.2, 2] id1 = [0, 1, 2, 2, 3] o1 = ['a', 'b', 'c', 'd', 'e'] c2 = [0.5, 1.05, 2.1] id2 = [0, 2, 4] o2 = ['z', 'y', 'x'] t1 = Table([c1, id1, o1], names=['col', 'id', 'o1']) t2 = Table([c2, id2, o2], names=['col', 'id', 'o2']) join_func = join_distance(0.2) join_funcs = {'col': join_func} t12 = table.join(t1, t2, join_type='outer', join_funcs=join_funcs) exp = ['col_id col_1 id o1 col_2 o2', '------ ----- --- --- ----- ---', ' 1 1.0 1 b -- --', ' 1 1.1 2 c 1.05 y', ' 1 1.2 2 d 1.05 y', ' 2 2.0 3 e -- --', ' 2 -- 4 -- 2.1 x', ' 3 0.0 0 a -- --', ' 4 -- 0 -- 0.5 z'] assert str(t12).splitlines() == exp left, right, keys = _apply_join_funcs(t1, t2, ('col', 'id'), join_funcs) assert keys == ('col_id', 'id') @pytest.mark.skipif('not HAS_SCIPY') def test_join_with_join_distance_1d_quantity(self): c1 = [0, 1, 1.1, 2] * u.m c2 = [500, 1050, 2100] * u.mm t1 = QTable([c1], names=['col']) t2 = QTable([c2], names=['col']) join_func = join_distance(20 * u.cm) t12 = table.join(t1, t2, join_funcs={'col': join_func}) exp = ['col_id col_1 col_2 ', ' m mm ', '------ ----- ------', ' 1 1.0 1050.0', ' 1 1.1 1050.0', ' 2 2.0 2100.0'] assert str(t12).splitlines() == exp # Generate column name conflict t2['col_id'] = [0, 0, 0] t2['col__id'] = [0, 0, 0] t12 = table.join(t1, t2, join_funcs={'col': join_func}) exp = ['col___id col_1 col_2 col_id col__id', ' m mm ', '-------- ----- ------ ------ -------', ' 1 1.0 1050.0 0 0', ' 1 1.1 1050.0 0 0', ' 2 2.0 2100.0 0 0'] assert str(t12).splitlines() == exp @pytest.mark.skipif('not HAS_SCIPY') def test_join_with_join_distance_2d(self): c1 = np.array([[0, 1, 1.1, 2], [0, 0, 1, 0]]).transpose() c2 = np.array([[0.5, 1.05, 2.1], [0, 0, 0]]).transpose() t1 = Table([c1], names=['col']) t2 = Table([c2], names=['col']) join_func = join_distance(0.2, kdtree_args={'leafsize': 32}, query_args={'p': 2}) t12 = table.join(t1, t2, join_type='outer', join_funcs={'col': join_func}) exp = ['col_id col_1 col_2 ', f'{t12["col_id"].dtype.name} float64[2] float64[2]', # int32 or int64 '------ ---------- -----------', ' 1 1.0 .. 0.0 1.05 .. 0.0', ' 2 2.0 .. 0.0 2.1 .. 0.0', ' 3 0.0 .. 0.0 -- .. --', ' 4 1.1 .. 1.0 -- .. --', ' 5 -- .. -- 0.5 .. 0.0'] assert t12.pformat(show_dtype=True) == exp def test_keys_left_right_basic(self): """Test using the keys_left and keys_right args to specify different join keys. This takes the standard test case but renames column 'a' to 'x' and 'y' respectively for tables 1 and 2. Then it compares the normal join on 'a' to the new join on 'x' and 'y'.""" self._setup() for join_type in ('inner', 'left', 'right', 'outer'): t1 = self.t1.copy() t2 = self.t2.copy() # Expected is same as joining on 'a' but with names 'x', 'y' instead t12_exp = table.join(t1, t2, keys='a', join_type=join_type) t12_exp.add_column(t12_exp['a'], name='x', index=1) t12_exp.add_column(t12_exp['a'], name='y', index=len(t1.colnames) + 1) del t12_exp['a'] # Different key names t1.rename_column('a', 'x') t2.rename_column('a', 'y') keys_left_list = ['x'] # Test string key name keys_right_list = [['y']] # Test list of string key names if join_type == 'outer': # Just do this for the outer join (others are the same) keys_left_list.append([t1['x'].tolist()]) # Test list key column keys_right_list.append([t2['y']]) # Test Column key column for keys_left, keys_right in zip(keys_left_list, keys_right_list): t12 = table.join(t1, t2, keys_left=keys_left, keys_right=keys_right, join_type=join_type) assert t12.colnames == t12_exp.colnames for col in t12.values_equal(t12_exp).itercols(): assert np.all(col) assert t12_exp.meta == t12.meta def test_keys_left_right_exceptions(self): """Test exceptions using the keys_left and keys_right args to specify different join keys. """ self._setup() t1 = self.t1 t2 = self.t2 msg = r"left table does not have key column 'z'" with pytest.raises(ValueError, match=msg): table.join(t1, t2, keys_left='z', keys_right=['a']) msg = r"left table has different length from key \[1, 2\]" with pytest.raises(ValueError, match=msg): table.join(t1, t2, keys_left=[[1, 2]], keys_right=['a']) msg = r"keys arg must be None if keys_left and keys_right are supplied" with pytest.raises(ValueError, match=msg): table.join(t1, t2, keys_left='z', keys_right=['a'], keys='a') msg = r"keys_left and keys_right args must have same length" with pytest.raises(ValueError, match=msg): table.join(t1, t2, keys_left=['a', 'b'], keys_right=['a']) msg = r"keys_left and keys_right must both be provided" with pytest.raises(ValueError, match=msg): table.join(t1, t2, keys_left=['a', 'b']) msg = r"cannot supply join_funcs arg and keys_left / keys_right" with pytest.raises(ValueError, match=msg): table.join(t1, t2, keys_left=['a'], keys_right=['a'], join_funcs={}) class TestSetdiff(): def _setup(self, t_cls=Table): lines1 = [' a b ', ' 0 foo ', ' 1 foo ', ' 1 bar ', ' 2 bar '] lines2 = [' a b ', ' 0 foo ', ' 3 foo ', ' 4 bar ', ' 2 bar '] lines3 = [' a b d ', ' 0 foo R1', ' 8 foo R2', ' 1 bar R3', ' 4 bar R4'] self.t1 = t_cls.read(lines1, format='ascii') self.t2 = t_cls.read(lines2, format='ascii') self.t3 = t_cls.read(lines3, format='ascii') def test_default_same_columns(self, operation_table_type): self._setup(operation_table_type) out = table.setdiff(self.t1, self.t2) assert type(out['a']) is type(self.t1['a']) # noqa assert type(out['b']) is type(self.t1['b']) # noqa assert out.pformat() == [' a b ', '--- ---', ' 1 bar', ' 1 foo'] def test_default_same_tables(self, operation_table_type): self._setup(operation_table_type) out = table.setdiff(self.t1, self.t1) assert type(out['a']) is type(self.t1['a']) # noqa assert type(out['b']) is type(self.t1['b']) # noqa assert out.pformat() == [' a b ', '--- ---'] def test_extra_col_left_table(self, operation_table_type): self._setup(operation_table_type) with pytest.raises(ValueError): table.setdiff(self.t3, self.t1) def test_extra_col_right_table(self, operation_table_type): self._setup(operation_table_type) out = table.setdiff(self.t1, self.t3) assert type(out['a']) is type(self.t1['a']) # noqa assert type(out['b']) is type(self.t1['b']) # noqa assert out.pformat() == [' a b ', '--- ---', ' 1 foo', ' 2 bar'] def test_keys(self, operation_table_type): self._setup(operation_table_type) out = table.setdiff(self.t3, self.t1, keys=['a', 'b']) assert type(out['a']) is type(self.t1['a']) # noqa assert type(out['b']) is type(self.t1['b']) # noqa assert out.pformat() == [' a b d ', '--- --- ---', ' 4 bar R4', ' 8 foo R2'] def test_missing_key(self, operation_table_type): self._setup(operation_table_type) with pytest.raises(ValueError): table.setdiff(self.t3, self.t1, keys=['a', 'd']) class TestVStack(): def _setup(self, t_cls=Table): self.t1 = t_cls.read([' a b', ' 0. foo', ' 1. bar'], format='ascii') self.t2 = t_cls.read([' a b c', ' 2. pez 4', ' 3. sez 5'], format='ascii') self.t3 = t_cls.read([' a b', ' 4. 7', ' 5. 8', ' 6. 9'], format='ascii') self.t4 = t_cls(self.t1, copy=True, masked=t_cls is Table) # The following table has meta-data that conflicts with t1 self.t5 = t_cls(self.t1, copy=True) self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])) self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) self.t4.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)])) self.t5.meta.update(OrderedDict([('b', 3), ('c', 'k'), ('d', 1)])) self.meta_merge = OrderedDict([('b', [1, 2, 3, 4, 5, 6]), ('c', {'a': 1, 'b': 1, 'c': 1}), ('d', 1), ('a', 1), ('e', 1)]) def test_validate_join_type(self): self._setup() with pytest.raises(TypeError, match='Did you accidentally call vstack'): table.vstack(self.t1, self.t2) def test_stack_rows(self, operation_table_type): self._setup(operation_table_type) t2 = self.t1.copy() t2.meta.clear() out = table.vstack([self.t1, t2[1]]) assert type(out['a']) is type(self.t1['a']) # noqa assert type(out['b']) is type(self.t1['b']) # noqa assert out.pformat() == [' a b ', '--- ---', '0.0 foo', '1.0 bar', '1.0 bar'] def test_stack_table_column(self, operation_table_type): self._setup(operation_table_type) t2 = self.t1.copy() t2.meta.clear() out = table.vstack([self.t1, t2['a']]) assert out.masked is False assert out.pformat() == [' a b ', '--- ---', '0.0 foo', '1.0 bar', '0.0 --', '1.0 --'] def test_table_meta_merge(self, operation_table_type): self._setup(operation_table_type) out = table.vstack([self.t1, self.t2, self.t4], join_type='inner') assert out.meta == self.meta_merge def test_table_meta_merge_conflict(self, operation_table_type): self._setup(operation_table_type) with pytest.warns(metadata.MergeConflictWarning) as w: out = table.vstack([self.t1, self.t5], join_type='inner') assert len(w) == 2 assert out.meta == self.t5.meta with pytest.warns(metadata.MergeConflictWarning) as w: out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='warn') assert len(w) == 2 assert out.meta == self.t5.meta out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='silent') assert out.meta == self.t5.meta with pytest.raises(MergeConflictError): out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='error') with pytest.raises(ValueError): out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='nonsense') def test_bad_input_type(self, operation_table_type): self._setup(operation_table_type) with pytest.raises(ValueError): table.vstack([]) with pytest.raises(TypeError): table.vstack(1) with pytest.raises(TypeError): table.vstack([self.t2, 1]) with pytest.raises(ValueError): table.vstack([self.t1, self.t2], join_type='invalid join type') def test_stack_basic_inner(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t4 = self.t4 t12 = table.vstack([t1, t2], join_type='inner') assert t12.masked is False assert type(t12) is operation_table_type assert type(t12['a']) is type(t1['a']) # noqa assert type(t12['b']) is type(t1['b']) # noqa assert t12.pformat() == [' a b ', '--- ---', '0.0 foo', '1.0 bar', '2.0 pez', '3.0 sez'] t124 = table.vstack([t1, t2, t4], join_type='inner') assert type(t124) is operation_table_type assert type(t12['a']) is type(t1['a']) # noqa assert type(t12['b']) is type(t1['b']) # noqa assert t124.pformat() == [' a b ', '--- ---', '0.0 foo', '1.0 bar', '2.0 pez', '3.0 sez', '0.0 foo', '1.0 bar'] def test_stack_basic_outer(self, operation_table_type): if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t4 = self.t4 t12 = table.vstack([t1, t2], join_type='outer') assert t12.masked is False assert t12.pformat() == [' a b c ', '--- --- ---', '0.0 foo --', '1.0 bar --', '2.0 pez 4', '3.0 sez 5'] t124 = table.vstack([t1, t2, t4], join_type='outer') assert t124.masked is False assert t124.pformat() == [' a b c ', '--- --- ---', '0.0 foo --', '1.0 bar --', '2.0 pez 4', '3.0 sez 5', '0.0 foo --', '1.0 bar --'] def test_stack_incompatible(self, operation_table_type): self._setup(operation_table_type) with pytest.raises(TableMergeError) as excinfo: table.vstack([self.t1, self.t3], join_type='inner') assert ("The 'b' columns have incompatible types: {}" .format([self.t1['b'].dtype.name, self.t3['b'].dtype.name]) in str(excinfo.value)) with pytest.raises(TableMergeError) as excinfo: table.vstack([self.t1, self.t3], join_type='outer') assert "The 'b' columns have incompatible types:" in str(excinfo.value) with pytest.raises(TableMergeError): table.vstack([self.t1, self.t2], join_type='exact') t1_reshape = self.t1.copy() t1_reshape['b'].shape = [2, 1] with pytest.raises(TableMergeError) as excinfo: table.vstack([self.t1, t1_reshape]) assert "have different shape" in str(excinfo.value) def test_vstack_one_masked(self, operation_table_type): if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') self._setup(operation_table_type) t1 = self.t1 t4 = self.t4 t4['b'].mask[1] = True t14 = table.vstack([t1, t4]) assert t14.masked is False assert t14.pformat() == [' a b ', '--- ---', '0.0 foo', '1.0 bar', '0.0 foo', '1.0 --'] def test_col_meta_merge_inner(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t4 = self.t4 # Key col 'a', should last value ('km') t1['a'].info.unit = 'cm' t2['a'].info.unit = 'm' t4['a'].info.unit = 'km' # Key col 'a' format should take last when all match t1['a'].info.format = '%f' t2['a'].info.format = '%f' t4['a'].info.format = '%f' # Key col 'b', take first value 't1_b' t1['b'].info.description = 't1_b' # Key col 'b', take first non-empty value '%6s' t4['b'].info.format = '%6s' # Key col 'a', should be merged meta t1['a'].info.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])) t2['a'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) t4['a'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)])) # Key col 'b', should be meta2 t2['b'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) if operation_table_type is Table: ctx = pytest.warns(metadata.MergeConflictWarning) else: ctx = nullcontext() with ctx as warning_lines: out = table.vstack([t1, t2, t4], join_type='inner') if operation_table_type is Table: assert len(warning_lines) == 2 assert ("In merged column 'a' the 'unit' attribute does not match (cm != m)" in str(warning_lines[0].message)) assert ("In merged column 'a' the 'unit' attribute does not match (m != km)" in str(warning_lines[1].message)) # Check units are suitably ignored for a regular Table assert out.pformat() == [' a b ', ' km ', '-------- ------', '0.000000 foo', '1.000000 bar', '2.000000 pez', '3.000000 sez', '0.000000 foo', '1.000000 bar'] else: # Check QTable correctly dealt with units. assert out.pformat() == [' a b ', ' km ', '-------- ------', '0.000000 foo', '0.000010 bar', '0.002000 pez', '0.003000 sez', '0.000000 foo', '1.000000 bar'] assert out['a'].info.unit == 'km' assert out['a'].info.format == '%f' assert out['b'].info.description == 't1_b' assert out['b'].info.format == '%6s' assert out['a'].info.meta == self.meta_merge assert out['b'].info.meta == OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]) def test_col_meta_merge_outer(self, operation_table_type): if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t4 = self.t4 # Key col 'a', should last value ('km') t1['a'].unit = 'cm' t2['a'].unit = 'm' t4['a'].unit = 'km' # Key col 'a' format should take last when all match t1['a'].info.format = '%0d' t2['a'].info.format = '%0d' t4['a'].info.format = '%0d' # Key col 'b', take first value 't1_b' t1['b'].info.description = 't1_b' # Key col 'b', take first non-empty value '%6s' t4['b'].info.format = '%6s' # Key col 'a', should be merged meta t1['a'].info.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])) t2['a'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) t4['a'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)])) # Key col 'b', should be meta2 t2['b'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) # All these should pass through t2['c'].unit = 'm' t2['c'].info.format = '%6s' t2['c'].info.description = 't2_c' with pytest.warns(metadata.MergeConflictWarning) as warning_lines: out = table.vstack([t1, t2, t4], join_type='outer') assert len(warning_lines) == 2 assert ("In merged column 'a' the 'unit' attribute does not match (cm != m)" in str(warning_lines[0].message)) assert ("In merged column 'a' the 'unit' attribute does not match (m != km)" in str(warning_lines[1].message)) assert out['a'].unit == 'km' assert out['a'].info.format == '%0d' assert out['b'].info.description == 't1_b' assert out['b'].info.format == '%6s' assert out['a'].info.meta == self.meta_merge assert out['b'].info.meta == OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]) assert out['c'].info.unit == 'm' assert out['c'].info.format == '%6s' assert out['c'].info.description == 't2_c' def test_vstack_one_table(self, operation_table_type): self._setup(operation_table_type) """Regression test for issue #3313""" assert (self.t1 == table.vstack(self.t1)).all() assert (self.t1 == table.vstack([self.t1])).all() def test_mixin_functionality(self, mixin_cols): col = mixin_cols['m'] len_col = len(col) t = table.QTable([col], names=['a']) cls_name = type(col).__name__ # Vstack works for these classes: if isinstance(col, (u.Quantity, Time, TimeDelta, SkyCoord, EarthLocation, BaseRepresentationOrDifferential)): out = table.vstack([t, t]) assert len(out) == len_col * 2 if cls_name == 'SkyCoord': # Argh, SkyCoord needs __eq__!! assert skycoord_equal(out['a'][len_col:], col) assert skycoord_equal(out['a'][:len_col], col) elif 'Repr' in cls_name or 'Diff' in cls_name: assert np.all(representation_equal(out['a'][:len_col], col)) assert np.all(representation_equal(out['a'][len_col:], col)) else: assert np.all(out['a'][:len_col] == col) assert np.all(out['a'][len_col:] == col) else: with pytest.raises(NotImplementedError) as err: table.vstack([t, t]) assert ('vstack unavailable for mixin column type(s): {}' .format(cls_name) in str(err.value)) # Check for outer stack which requires masking. Only Time supports # this currently. t2 = table.QTable([col], names=['b']) # different from col name for t if isinstance(col, (Time, TimeDelta, Quantity)): out = table.vstack([t, t2], join_type='outer') assert len(out) == len_col * 2 assert np.all(out['a'][:len_col] == col) assert np.all(out['b'][len_col:] == col) assert check_mask(out['a'], [False] * len_col + [True] * len_col) assert check_mask(out['b'], [True] * len_col + [False] * len_col) # check directly stacking mixin columns: out2 = table.vstack([t, t2['b']]) assert np.all(out['a'] == out2['a']) assert np.all(out['b'] == out2['b']) else: with pytest.raises(NotImplementedError) as err: table.vstack([t, t2], join_type='outer') assert ('vstack requires masking' in str(err.value) or 'vstack unavailable' in str(err.value)) def test_vstack_different_representation(self): """Test that representations can be mixed together.""" rep1 = CartesianRepresentation([1, 2]*u.km, [3, 4]*u.km, 1*u.km) rep2 = SphericalRepresentation([0]*u.deg, [0]*u.deg, 10*u.km) t1 = Table([rep1]) t2 = Table([rep2]) t12 = table.vstack([t1, t2]) expected = CartesianRepresentation([1, 2, 10]*u.km, [3, 4, 0]*u.km, [1, 1, 0]*u.km) assert np.all(representation_equal(t12['col0'], expected)) rep3 = UnitSphericalRepresentation([0]*u.deg, [0]*u.deg) t3 = Table([rep3]) with pytest.raises(ValueError, match='representations are inconsistent'): table.vstack([t1, t3]) class TestDStack(): def _setup(self, t_cls=Table): self.t1 = t_cls.read([' a b', ' 0. foo', ' 1. bar'], format='ascii') self.t2 = t_cls.read([' a b c', ' 2. pez 4', ' 3. sez 5'], format='ascii') self.t2['d'] = Time([1, 2], format='cxcsec') self.t3 = t_cls({'a': [[5., 6.], [4., 3.]], 'b': [['foo', 'bar'], ['pez', 'sez']]}, names=('a', 'b')) self.t4 = t_cls(self.t1, copy=True, masked=t_cls is Table) self.t5 = t_cls({'a': [[4., 2.], [1., 6.]], 'b': [['foo', 'pez'], ['bar', 'sez']]}, names=('a', 'b')) self.t6 = t_cls.read([' a b c', ' 7. pez 2', ' 4. sez 6', ' 6. foo 3'], format='ascii') def test_validate_join_type(self): self._setup() with pytest.raises(TypeError, match='Did you accidentally call dstack'): table.dstack(self.t1, self.t2) @staticmethod def compare_dstack(tables, out): for ii, tbl in enumerate(tables): for name, out_col in out.columns.items(): if name in tbl.colnames: # Columns always compare equal assert np.all(tbl[name] == out[name][:, ii]) # If input has a mask then output must have same mask if hasattr(tbl[name], 'mask'): assert np.all(tbl[name].mask == out[name].mask[:, ii]) # If input has no mask then output might have a mask (if other table # is missing that column). If so then all mask values should be False. elif hasattr(out[name], 'mask'): assert not np.any(out[name].mask[:, ii]) else: # Column missing for this table, out must have a mask with all True. assert np.all(out[name].mask[:, ii]) def test_dstack_table_column(self, operation_table_type): """Stack a table with 3 cols and one column (gets auto-converted to Table). """ self._setup(operation_table_type) t2 = self.t1.copy() out = table.dstack([self.t1, t2['a']]) self.compare_dstack([self.t1, t2[('a',)]], out) def test_dstack_basic_outer(self, operation_table_type): if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t4 = self.t4 t4['a'].mask[0] = True # Test for non-masked table t12 = table.dstack([t1, t2], join_type='outer') assert type(t12) is operation_table_type assert type(t12['a']) is type(t1['a']) # noqa assert type(t12['b']) is type(t1['b']) # noqa self.compare_dstack([t1, t2], t12) # Test for masked table t124 = table.dstack([t1, t2, t4], join_type='outer') assert type(t124) is operation_table_type assert type(t124['a']) is type(t4['a']) # noqa assert type(t124['b']) is type(t4['b']) # noqa self.compare_dstack([t1, t2, t4], t124) def test_dstack_basic_inner(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t4 = self.t4 # Test for masked table t124 = table.dstack([t1, t2, t4], join_type='inner') assert type(t124) is operation_table_type assert type(t124['a']) is type(t4['a']) # noqa assert type(t124['b']) is type(t4['b']) # noqa self.compare_dstack([t1, t2, t4], t124) def test_dstack_multi_dimension_column(self, operation_table_type): self._setup(operation_table_type) t3 = self.t3 t5 = self.t5 t2 = self.t2 t35 = table.dstack([t3, t5]) assert type(t35) is operation_table_type assert type(t35['a']) is type(t3['a']) # noqa assert type(t35['b']) is type(t3['b']) # noqa self.compare_dstack([t3, t5], t35) with pytest.raises(TableMergeError): table.dstack([t2, t3]) def test_dstack_different_length_table(self, operation_table_type): self._setup(operation_table_type) t2 = self.t2 t6 = self.t6 with pytest.raises(ValueError): table.dstack([t2, t6]) def test_dstack_single_table(self): self._setup(Table) out = table.dstack(self.t1) assert np.all(out == self.t1) def test_dstack_representation(self): rep1 = SphericalRepresentation([1, 2]*u.deg, [3, 4]*u.deg, 1*u.kpc) rep2 = SphericalRepresentation([10, 20]*u.deg, [30, 40]*u.deg, 10*u.kpc) t1 = Table([rep1]) t2 = Table([rep2]) t12 = table.dstack([t1, t2]) assert np.all(representation_equal(t12['col0'][:, 0], rep1)) assert np.all(representation_equal(t12['col0'][:, 1], rep2)) def test_dstack_skycoord(self): sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg) sc2 = SkyCoord([10, 20]*u.deg, [30, 40]*u.deg) t1 = Table([sc1]) t2 = Table([sc2]) t12 = table.dstack([t1, t2]) assert skycoord_equal(sc1, t12['col0'][:, 0]) assert skycoord_equal(sc2, t12['col0'][:, 1]) class TestHStack(): def _setup(self, t_cls=Table): self.t1 = t_cls.read([' a b', ' 0. foo', ' 1. bar'], format='ascii') self.t2 = t_cls.read([' a b c', ' 2. pez 4', ' 3. sez 5'], format='ascii') self.t3 = t_cls.read([' d e', ' 4. 7', ' 5. 8', ' 6. 9'], format='ascii') self.t4 = t_cls(self.t1, copy=True, masked=True) self.t4['a'].name = 'f' self.t4['b'].name = 'g' # The following table has meta-data that conflicts with t1 self.t5 = t_cls(self.t1, copy=True) self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])) self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) self.t4.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)])) self.t5.meta.update(OrderedDict([('b', 3), ('c', 'k'), ('d', 1)])) self.meta_merge = OrderedDict([('b', [1, 2, 3, 4, 5, 6]), ('c', {'a': 1, 'b': 1, 'c': 1}), ('d', 1), ('a', 1), ('e', 1)]) def test_validate_join_type(self): self._setup() with pytest.raises(TypeError, match='Did you accidentally call hstack'): table.hstack(self.t1, self.t2) def test_stack_same_table(self, operation_table_type): """ From #2995, test that hstack'ing references to the same table has the expected output. """ self._setup(operation_table_type) out = table.hstack([self.t1, self.t1]) assert out.masked is False assert out.pformat() == ['a_1 b_1 a_2 b_2', '--- --- --- ---', '0.0 foo 0.0 foo', '1.0 bar 1.0 bar'] def test_stack_rows(self, operation_table_type): self._setup(operation_table_type) out = table.hstack([self.t1[0], self.t2[1]]) assert out.masked is False assert out.pformat() == ['a_1 b_1 a_2 b_2 c ', '--- --- --- --- ---', '0.0 foo 3.0 sez 5'] def test_stack_columns(self, operation_table_type): self._setup(operation_table_type) out = table.hstack([self.t1, self.t2['c']]) assert type(out['a']) is type(self.t1['a']) # noqa assert type(out['b']) is type(self.t1['b']) # noqa assert type(out['c']) is type(self.t2['c']) # noqa assert out.pformat() == [' a b c ', '--- --- ---', '0.0 foo 4', '1.0 bar 5'] def test_table_meta_merge(self, operation_table_type): self._setup(operation_table_type) out = table.hstack([self.t1, self.t2, self.t4], join_type='inner') assert out.meta == self.meta_merge def test_table_meta_merge_conflict(self, operation_table_type): self._setup(operation_table_type) with pytest.warns(metadata.MergeConflictWarning) as w: out = table.hstack([self.t1, self.t5], join_type='inner') assert len(w) == 2 assert out.meta == self.t5.meta with pytest.warns(metadata.MergeConflictWarning) as w: out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='warn') assert len(w) == 2 assert out.meta == self.t5.meta out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='silent') assert out.meta == self.t5.meta with pytest.raises(MergeConflictError): out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='error') with pytest.raises(ValueError): out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='nonsense') def test_bad_input_type(self, operation_table_type): self._setup(operation_table_type) with pytest.raises(ValueError): table.hstack([]) with pytest.raises(TypeError): table.hstack(1) with pytest.raises(TypeError): table.hstack([self.t2, 1]) with pytest.raises(ValueError): table.hstack([self.t1, self.t2], join_type='invalid join type') def test_stack_basic(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t3 = self.t3 t4 = self.t4 out = table.hstack([t1, t2], join_type='inner') assert out.masked is False assert type(out) is operation_table_type assert type(out['a_1']) is type(t1['a']) # noqa assert type(out['b_1']) is type(t1['b']) # noqa assert type(out['a_2']) is type(t2['a']) # noqa assert type(out['b_2']) is type(t2['b']) # noqa assert out.pformat() == ['a_1 b_1 a_2 b_2 c ', '--- --- --- --- ---', '0.0 foo 2.0 pez 4', '1.0 bar 3.0 sez 5'] # stacking as a list gives same result out_list = table.hstack([t1, t2], join_type='inner') assert out.pformat() == out_list.pformat() out = table.hstack([t1, t2], join_type='outer') assert out.pformat() == out_list.pformat() out = table.hstack([t1, t2, t3, t4], join_type='outer') assert out.masked is False assert out.pformat() == ['a_1 b_1 a_2 b_2 c d e f g ', '--- --- --- --- --- --- --- --- ---', '0.0 foo 2.0 pez 4 4.0 7 0.0 foo', '1.0 bar 3.0 sez 5 5.0 8 1.0 bar', ' -- -- -- -- -- 6.0 9 -- --'] out = table.hstack([t1, t2, t3, t4], join_type='inner') assert out.masked is False assert out.pformat() == ['a_1 b_1 a_2 b_2 c d e f g ', '--- --- --- --- --- --- --- --- ---', '0.0 foo 2.0 pez 4 4.0 7 0.0 foo', '1.0 bar 3.0 sez 5 5.0 8 1.0 bar'] def test_stack_incompatible(self, operation_table_type): self._setup(operation_table_type) # For join_type exact, which will fail here because n_rows # does not match with pytest.raises(TableMergeError): table.hstack([self.t1, self.t3], join_type='exact') def test_hstack_one_masked(self, operation_table_type): if operation_table_type is QTable: pytest.xfail() self._setup(operation_table_type) t1 = self.t1 t2 = operation_table_type(t1, copy=True, masked=True) t2.meta.clear() t2['b'].mask[1] = True out = table.hstack([t1, t2]) assert out.pformat() == ['a_1 b_1 a_2 b_2', '--- --- --- ---', '0.0 foo 0.0 foo', '1.0 bar 1.0 --'] def test_table_col_rename(self, operation_table_type): self._setup(operation_table_type) out = table.hstack([self.t1, self.t2], join_type='inner', uniq_col_name='{table_name}_{col_name}', table_names=('left', 'right')) assert out.masked is False assert out.pformat() == ['left_a left_b right_a right_b c ', '------ ------ ------- ------- ---', ' 0.0 foo 2.0 pez 4', ' 1.0 bar 3.0 sez 5'] def test_col_meta_merge(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t3 = self.t3[:2] t4 = self.t4 # Just set a bunch of meta and make sure it is the same in output meta1 = OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]) t1['a'].unit = 'cm' t1['b'].info.description = 't1_b' t4['f'].info.format = '%6s' t1['b'].info.meta.update(meta1) t3['d'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) t4['g'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)])) t3['e'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) t3['d'].unit = 'm' t3['d'].info.format = '%6s' t3['d'].info.description = 't3_c' out = table.hstack([t1, t3, t4], join_type='exact') for t in [t1, t3, t4]: for name in t.colnames: for attr in ('meta', 'unit', 'format', 'description'): assert getattr(out[name].info, attr) == getattr(t[name].info, attr) # Make sure we got a copy of meta, not ref t1['b'].info.meta['b'] = None assert out['b'].info.meta['b'] == [1, 2] def test_hstack_one_table(self, operation_table_type): self._setup(operation_table_type) """Regression test for issue #3313""" assert (self.t1 == table.hstack(self.t1)).all() assert (self.t1 == table.hstack([self.t1])).all() def test_mixin_functionality(self, mixin_cols): col1 = mixin_cols['m'] col2 = col1[2:4] # Shorter version of col1 t1 = table.QTable([col1]) t2 = table.QTable([col2]) cls_name = type(col1).__name__ out = table.hstack([t1, t2], join_type='inner') assert type(out['col0_1']) is type(out['col0_2']) # noqa assert len(out) == len(col2) # Check that columns are as expected. if cls_name == 'SkyCoord': assert skycoord_equal(out['col0_1'], col1[:len(col2)]) assert skycoord_equal(out['col0_2'], col2) elif 'Repr' in cls_name or 'Diff' in cls_name: assert np.all(representation_equal(out['col0_1'], col1[:len(col2)])) assert np.all(representation_equal(out['col0_2'], col2)) else: assert np.all(out['col0_1'] == col1[:len(col2)]) assert np.all(out['col0_2'] == col2) # Time class supports masking, all other mixins do not if isinstance(col1, (Time, TimeDelta, Quantity)): out = table.hstack([t1, t2], join_type='outer') assert len(out) == len(t1) assert np.all(out['col0_1'] == col1) assert np.all(out['col0_2'][:len(col2)] == col2) assert check_mask(out['col0_2'], [False, False, True, True]) # check directly stacking mixin columns: out2 = table.hstack([t1, t2['col0']], join_type='outer') assert np.all(out['col0_1'] == out2['col0_1']) assert np.all(out['col0_2'] == out2['col0_2']) else: with pytest.raises(NotImplementedError) as err: table.hstack([t1, t2], join_type='outer') assert 'hstack requires masking' in str(err.value) def test_unique(operation_table_type): t = operation_table_type.read( [' a b c d', ' 2 b 7.0 0', ' 1 c 3.0 5', ' 2 b 6.0 2', ' 2 a 4.0 3', ' 1 a 1.0 7', ' 2 b 5.0 1', ' 0 a 0.0 4', ' 1 a 2.0 6', ' 1 c 3.0 5', ], format='ascii') tu = operation_table_type(np.sort(t[:-1])) t_all = table.unique(t) assert sort_eq(t_all.pformat(), tu.pformat()) t_s = t.copy() del t_s['b', 'c', 'd'] t_all = table.unique(t_s) assert sort_eq(t_all.pformat(), [' a ', '---', ' 0', ' 1', ' 2']) key1 = 'a' t1a = table.unique(t, key1) assert sort_eq(t1a.pformat(), [' a b c d ', '--- --- --- ---', ' 0 a 0.0 4', ' 1 c 3.0 5', ' 2 b 7.0 0']) t1b = table.unique(t, key1, keep='last') assert sort_eq(t1b.pformat(), [' a b c d ', '--- --- --- ---', ' 0 a 0.0 4', ' 1 c 3.0 5', ' 2 b 5.0 1']) t1c = table.unique(t, key1, keep='none') assert sort_eq(t1c.pformat(), [' a b c d ', '--- --- --- ---', ' 0 a 0.0 4']) key2 = ['a', 'b'] t2a = table.unique(t, key2) assert sort_eq(t2a.pformat(), [' a b c d ', '--- --- --- ---', ' 0 a 0.0 4', ' 1 a 1.0 7', ' 1 c 3.0 5', ' 2 a 4.0 3', ' 2 b 7.0 0']) t2b = table.unique(t, key2, keep='last') assert sort_eq(t2b.pformat(), [' a b c d ', '--- --- --- ---', ' 0 a 0.0 4', ' 1 a 2.0 6', ' 1 c 3.0 5', ' 2 a 4.0 3', ' 2 b 5.0 1']) t2c = table.unique(t, key2, keep='none') assert sort_eq(t2c.pformat(), [' a b c d ', '--- --- --- ---', ' 0 a 0.0 4', ' 2 a 4.0 3']) key2 = ['a', 'a'] with pytest.raises(ValueError) as exc: t2a = table.unique(t, key2) assert exc.value.args[0] == "duplicate key names" with pytest.raises(ValueError) as exc: table.unique(t, key2, keep=True) assert exc.value.args[0] == ( "'keep' should be one of 'first', 'last', 'none'") t1_m = operation_table_type(t1a, masked=True) t1_m['a'].mask[1] = True with pytest.raises(ValueError) as exc: t1_mu = table.unique(t1_m) assert exc.value.args[0] == ( "cannot use columns with masked values as keys; " "remove column 'a' from keys and rerun unique()") t1_mu = table.unique(t1_m, silent=True) assert t1_mu.masked is False assert t1_mu.pformat() == [' a b c d ', '--- --- --- ---', ' 0 a 0.0 4', ' 2 b 7.0 0', ' -- c 3.0 5'] with pytest.raises(ValueError): t1_mu = table.unique(t1_m, silent=True, keys='a') t1_m = operation_table_type(t, masked=True) t1_m['a'].mask[1] = True t1_m['d'].mask[3] = True # Test that multiple masked key columns get removed in the correct # order t1_mu = table.unique(t1_m, keys=['d', 'a', 'b'], silent=True) assert t1_mu.masked is False assert t1_mu.pformat() == [' a b c d ', '--- --- --- ---', ' 2 a 4.0 --', ' 2 b 7.0 0', ' -- c 3.0 5'] def test_vstack_bytes(operation_table_type): """ Test for issue #5617 when vstack'ing bytes columns in Py3. This is really an upstream numpy issue numpy/numpy/#8403. """ t = operation_table_type([[b'a']], names=['a']) assert t['a'].itemsize == 1 t2 = table.vstack([t, t]) assert len(t2) == 2 assert t2['a'].itemsize == 1 def test_vstack_unicode(): """ Test for problem related to issue #5617 when vstack'ing *unicode* columns. In this case the character size gets multiplied by 4. """ t = table.Table([['a']], names=['a']) assert t['a'].itemsize == 4 # 4-byte / char for U dtype t2 = table.vstack([t, t]) assert len(t2) == 2 assert t2['a'].itemsize == 4 def test_join_mixins_time_quantity(): """ Test for table join using non-ndarray key columns. """ tm1 = Time([2, 1, 2], format='cxcsec') q1 = [2, 1, 1] * u.m idx1 = [1, 2, 3] tm2 = Time([2, 3], format='cxcsec') q2 = [2, 3] * u.m idx2 = [10, 20] t1 = Table([tm1, q1, idx1], names=['tm', 'q', 'idx']) t2 = Table([tm2, q2, idx2], names=['tm', 'q', 'idx']) # Output: # # <Table length=4> # tm q idx_1 idx_2 # m # object float64 int64 int64 # ------------------ ------- ----- ----- # 0.9999999999969589 1.0 2 -- # 2.00000000000351 1.0 3 -- # 2.00000000000351 2.0 1 10 # 3.000000000000469 3.0 -- 20 t12 = table.join(t1, t2, join_type='outer', keys=['tm', 'q']) # Key cols are lexically sorted assert np.all(t12['tm'] == Time([1, 2, 2, 3], format='cxcsec')) assert np.all(t12['q'] == [1, 1, 2, 3] * u.m) assert np.all(t12['idx_1'] == np.ma.array([2, 3, 1, 0], mask=[0, 0, 0, 1])) assert np.all(t12['idx_2'] == np.ma.array([0, 0, 10, 20], mask=[1, 1, 0, 0])) def test_join_mixins_not_sortable(): """ Test for table join using non-ndarray key columns that are not sortable. """ sc = SkyCoord([1, 2], [3, 4], unit='deg,deg') t1 = Table([sc, [1, 2]], names=['sc', 'idx1']) t2 = Table([sc, [10, 20]], names=['sc', 'idx2']) with pytest.raises(TypeError, match='one or more key columns are not sortable'): table.join(t1, t2, keys='sc') def test_join_non_1d_key_column(): c1 = [[1, 2], [3, 4]] c2 = [1, 2] t1 = Table([c1, c2], names=['a', 'b']) t2 = t1.copy() with pytest.raises(ValueError, match="key column 'a' must be 1-d"): table.join(t1, t2, keys='a') def test_argsort_time_column(): """Regression test for #10823.""" times = Time(['2016-01-01', '2018-01-01', '2017-01-01']) t = Table([times], names=['time']) i = t.argsort('time') assert np.all(i == times.argsort()) def test_sort_indexed_table(): """Test fix for #9473 and #6545 - and another regression test for #10823.""" t = Table([[1, 3, 2], [6, 4, 5]], names=('a', 'b')) t.add_index('a') t.sort('a') assert np.all(t['a'] == [1, 2, 3]) assert np.all(t['b'] == [6, 5, 4]) t.sort('b') assert np.all(t['b'] == [4, 5, 6]) assert np.all(t['a'] == [3, 2, 1]) times = ['2016-01-01', '2018-01-01', '2017-01-01'] tm = Time(times) t2 = Table([tm, [3, 2, 1]], names=['time', 'flux']) t2.sort('flux') assert np.all(t2['flux'] == [1, 2, 3]) t2.sort('time') assert np.all(t2['flux'] == [3, 1, 2]) assert np.all(t2['time'] == tm[[0, 2, 1]]) # Using the table as a TimeSeries implicitly sets the index, so # this test is a bit different from the above. from astropy.timeseries import TimeSeries ts = TimeSeries(time=times) ts['flux'] = [3, 2, 1] ts.sort('flux') assert np.all(ts['flux'] == [1, 2, 3]) ts.sort('time') assert np.all(ts['flux'] == [3, 1, 2]) assert np.all(ts['time'] == tm[[0, 2, 1]]) def test_get_out_class(): c = table.Column([1, 2]) mc = table.MaskedColumn([1, 2]) q = [1, 2] * u.m assert _get_out_class([c, mc]) is mc.__class__ assert _get_out_class([mc, c]) is mc.__class__ assert _get_out_class([c, c]) is c.__class__ assert _get_out_class([c]) is c.__class__ with pytest.raises(ValueError): _get_out_class([c, q]) with pytest.raises(ValueError): _get_out_class([q, c]) def test_masking_required_exception(): """ Test that outer join, hstack and vstack fail for a mixin column which does not support masking. """ col = table.NdarrayMixin([0, 1, 2, 3]) t1 = table.QTable([[1, 2, 3, 4], col], names=['a', 'b']) t2 = table.QTable([[1, 2], col[:2]], names=['a', 'c']) with pytest.raises(NotImplementedError) as err: table.vstack([t1, t2], join_type='outer') assert 'vstack unavailable' in str(err.value) with pytest.raises(NotImplementedError) as err: table.hstack([t1, t2], join_type='outer') assert 'hstack requires masking' in str(err.value) with pytest.raises(NotImplementedError) as err: table.join(t1, t2, join_type='outer') assert 'join requires masking' in str(err.value) def test_stack_columns(): c = table.Column([1, 2]) mc = table.MaskedColumn([1, 2]) q = [1, 2] * u.m time = Time(['2001-01-02T12:34:56', '2001-02-03T00:01:02']) sc = SkyCoord([1, 2], [3, 4], unit='deg') cq = table.Column([11, 22], unit=u.m) t = table.hstack([c, q]) assert t.__class__ is table.QTable assert t.masked is False t = table.hstack([q, c]) assert t.__class__ is table.QTable assert t.masked is False t = table.hstack([mc, q]) assert t.__class__ is table.QTable assert t.masked is False t = table.hstack([c, mc]) assert t.__class__ is table.Table assert t.masked is False t = table.vstack([q, q]) assert t.__class__ is table.QTable t = table.vstack([c, c]) assert t.__class__ is table.Table t = table.hstack([c, time]) assert t.__class__ is table.Table t = table.hstack([c, sc]) assert t.__class__ is table.Table t = table.hstack([q, time, sc]) assert t.__class__ is table.QTable with pytest.raises(ValueError): table.vstack([c, q]) with pytest.raises(ValueError): t = table.vstack([q, cq]) def test_mixin_join_regression(): # This used to trigger a ValueError: # ValueError: NumPy boolean array indexing assignment cannot assign # 6 input values to the 4 output values where the mask is true t1 = QTable() t1['index'] = [1, 2, 3, 4, 5] t1['flux1'] = [2, 3, 2, 1, 1] * u.Jy t1['flux2'] = [2, 3, 2, 1, 1] * u.Jy t2 = QTable() t2['index'] = [3, 4, 5, 6] t2['flux1'] = [2, 1, 1, 3] * u.Jy t2['flux2'] = [2, 1, 1, 3] * u.Jy t12 = table.join(t1, t2, keys=('index', 'flux1', 'flux2'), join_type='outer') assert len(t12) == 6
61e96cc44170c25e657b0c64b628712ba0a8784859abbfd5c175345d4b1a6776
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import copy import pickle from io import StringIO import pytest import numpy as np from astropy.table.serialize import represent_mixins_as_columns from astropy.utils.data_info import ParentDtypeInfo from astropy.table.table_helpers import ArrayWrapper from astropy.coordinates import EarthLocation, SkyCoord from astropy.table import Table, QTable, join, hstack, vstack, Column, NdarrayMixin from astropy.table import serialize from astropy import time from astropy import coordinates from astropy import units as u from astropy.table.column import BaseColumn from astropy.table import table_helpers from astropy.utils.exceptions import AstropyUserWarning from astropy.utils.metadata import MergeConflictWarning from astropy.coordinates.tests.test_representation import representation_equal from astropy.coordinates.tests.helper import skycoord_equal from .conftest import MIXIN_COLS def test_attributes(mixin_cols): """ Required attributes for a column can be set. """ m = mixin_cols['m'] m.info.name = 'a' assert m.info.name == 'a' m.info.description = 'a' assert m.info.description == 'a' # Cannot set unit for these classes if isinstance(m, (u.Quantity, coordinates.SkyCoord, time.Time, time.TimeDelta, coordinates.BaseRepresentationOrDifferential)): with pytest.raises(AttributeError): m.info.unit = u.m else: m.info.unit = u.m assert m.info.unit is u.m m.info.format = 'a' assert m.info.format == 'a' m.info.meta = {'a': 1} assert m.info.meta == {'a': 1} with pytest.raises(AttributeError): m.info.bad_attr = 1 with pytest.raises(AttributeError): m.info.bad_attr def check_mixin_type(table, table_col, in_col): # We check for QuantityInfo rather than just isinstance(col, u.Quantity) # since we want to treat EarthLocation as a mixin, even though it is # a Quantity subclass. if ((isinstance(in_col.info, u.QuantityInfo) and type(table) is not QTable) or isinstance(in_col, Column)): assert type(table_col) is table.ColumnClass else: assert type(table_col) is type(in_col) # Make sure in_col got copied and creating table did not touch it assert in_col.info.name is None def test_make_table(table_types, mixin_cols): """ Make a table with the columns in mixin_cols, which is an ordered dict of three cols: 'a' and 'b' are table_types.Column type, and 'm' is a mixin. """ t = table_types.Table(mixin_cols) check_mixin_type(t, t['m'], mixin_cols['m']) cols = list(mixin_cols.values()) t = table_types.Table(cols, names=('i', 'a', 'b', 'm')) check_mixin_type(t, t['m'], mixin_cols['m']) t = table_types.Table(cols) check_mixin_type(t, t['col3'], mixin_cols['m']) def test_io_ascii_write(): """ Test that table with mixin column can be written by io.ascii for every pure Python writer. No validation of the output is done, this just confirms no exceptions. """ from astropy.io.ascii.connect import _get_connectors_table t = QTable(MIXIN_COLS) for fmt in _get_connectors_table(): if fmt['Write'] and '.fast_' not in fmt['Format']: out = StringIO() t.write(out, format=fmt['Format']) def test_votable_quantity_write(tmpdir): """ Test that table with Quantity mixin column can be round-tripped by io.votable. Note that FITS and HDF5 mixin support are tested (much more thoroughly) in their respective subpackage tests (io/fits/tests/test_connect.py and io/misc/tests/test_hdf5.py). """ t = QTable() t['a'] = u.Quantity([1, 2, 4], unit='nm') filename = str(tmpdir.join('table-tmp')) t.write(filename, format='votable', overwrite=True) qt = QTable.read(filename, format='votable') assert isinstance(qt['a'], u.Quantity) assert qt['a'].unit == 'nm' @pytest.mark.remote_data @pytest.mark.parametrize('table_types', (Table, QTable)) def test_io_time_write_fits_standard(tmpdir, table_types): """ Test that table with Time mixin columns can be written by io.fits. Validation of the output is done. Test that io.fits writes a table containing Time mixin columns that can be partially round-tripped (metadata scale, location). Note that we postpone checking the "local" scale, since that cannot be done with format 'cxcsec', as it requires an epoch. """ t = table_types([[1, 2], ['string', 'column']]) for scale in time.STANDARD_TIME_SCALES: t['a' + scale] = time.Time([[1, 2], [3, 4]], format='cxcsec', scale=scale, location=EarthLocation( -2446354, 4237210, 4077985, unit='m')) t['b' + scale] = time.Time(['1999-01-01T00:00:00.123456789', '2010-01-01T00:00:00'], scale=scale) t['c'] = [3., 4.] filename = str(tmpdir.join('table-tmp')) # Show that FITS format succeeds with pytest.warns( AstropyUserWarning, match='Time Column "btai" has no specified location, ' 'but global Time Position is present'): t.write(filename, format='fits', overwrite=True) with pytest.warns( AstropyUserWarning, match='Time column reference position "TRPOSn" is not specified'): tm = table_types.read(filename, format='fits', astropy_native=True) for scale in time.STANDARD_TIME_SCALES: for ab in ('a', 'b'): name = ab + scale # Assert that the time columns are read as Time assert isinstance(tm[name], time.Time) # Assert that the scales round-trip assert tm[name].scale == t[name].scale # Assert that the format is jd assert tm[name].format == 'jd' # Assert that the location round-trips assert tm[name].location == t[name].location # Finally assert that the column data round-trips assert (tm[name] == t[name]).all() for name in ('col0', 'col1', 'c'): # Assert that the non-time columns are read as Column assert isinstance(tm[name], Column) # Assert that the non-time columns' data round-trips assert (tm[name] == t[name]).all() # Test for conversion of time data to its value, as defined by its format for scale in time.STANDARD_TIME_SCALES: for ab in ('a', 'b'): name = ab + scale t[name].info.serialize_method['fits'] = 'formatted_value' t.write(filename, format='fits', overwrite=True) tm = table_types.read(filename, format='fits') for scale in time.STANDARD_TIME_SCALES: for ab in ('a', 'b'): name = ab + scale assert not isinstance(tm[name], time.Time) assert (tm[name] == t[name].value).all() @pytest.mark.parametrize('table_types', (Table, QTable)) def test_io_time_write_fits_local(tmpdir, table_types): """ Test that table with a Time mixin with scale local can also be written by io.fits. Like ``test_io_time_write_fits_standard`` above, but avoiding ``cxcsec`` format, which requires an epoch and thus cannot be used for a local time scale. """ t = table_types([[1, 2], ['string', 'column']]) t['a_local'] = time.Time([[50001, 50002], [50003, 50004]], format='mjd', scale='local', location=EarthLocation(-2446354, 4237210, 4077985, unit='m')) t['b_local'] = time.Time(['1999-01-01T00:00:00.123456789', '2010-01-01T00:00:00'], scale='local') t['c'] = [3., 4.] filename = str(tmpdir.join('table-tmp')) # Show that FITS format succeeds with pytest.warns(AstropyUserWarning, match='Time Column "b_local" has no specified location'): t.write(filename, format='fits', overwrite=True) with pytest.warns(AstropyUserWarning, match='Time column reference position "TRPOSn" is not specified.'): tm = table_types.read(filename, format='fits', astropy_native=True) for ab in ('a', 'b'): name = ab + '_local' # Assert that the time columns are read as Time assert isinstance(tm[name], time.Time) # Assert that the scales round-trip assert tm[name].scale == t[name].scale # Assert that the format is jd assert tm[name].format == 'jd' # Assert that the location round-trips assert tm[name].location == t[name].location # Finally assert that the column data round-trips assert (tm[name] == t[name]).all() for name in ('col0', 'col1', 'c'): # Assert that the non-time columns are read as Column assert isinstance(tm[name], Column) # Assert that the non-time columns' data round-trips assert (tm[name] == t[name]).all() # Test for conversion of time data to its value, as defined by its format. for ab in ('a', 'b'): name = ab + '_local' t[name].info.serialize_method['fits'] = 'formatted_value' t.write(filename, format='fits', overwrite=True) tm = table_types.read(filename, format='fits') for ab in ('a', 'b'): name = ab + '_local' assert not isinstance(tm[name], time.Time) assert (tm[name] == t[name].value).all() def test_votable_mixin_write_fail(mixin_cols): """ Test that table with mixin columns (excluding Quantity) cannot be written by io.votable. """ t = QTable(mixin_cols) # Only do this test if there are unsupported column types (i.e. anything besides # BaseColumn and Quantity class instances). unsupported_cols = t.columns.not_isinstance((BaseColumn, u.Quantity)) if not unsupported_cols: pytest.skip("no unsupported column types") out = StringIO() with pytest.raises(ValueError) as err: t.write(out, format='votable') assert 'cannot write table with mixin column(s)' in str(err.value) def test_join(table_types): """ Join tables with mixin cols. Use column "i" as proxy for what the result should be for each mixin. """ t1 = table_types.Table() t1['a'] = table_types.Column(['a', 'b', 'b', 'c']) t1['i'] = table_types.Column([0, 1, 2, 3]) for name, col in MIXIN_COLS.items(): t1[name] = col t2 = table_types.Table(t1) t2['a'] = ['b', 'c', 'a', 'd'] for name, col in MIXIN_COLS.items(): t1[name].info.description = name t2[name].info.description = name + '2' for join_type in ('inner', 'left'): t12 = join(t1, t2, keys='a', join_type=join_type) idx1 = t12['i_1'] idx2 = t12['i_2'] for name, col in MIXIN_COLS.items(): name1 = name + '_1' name2 = name + '_2' assert_table_name_col_equal(t12, name1, col[idx1]) assert_table_name_col_equal(t12, name2, col[idx2]) assert t12[name1].info.description == name assert t12[name2].info.description == name + '2' for join_type in ('outer', 'right'): with pytest.raises(NotImplementedError) as exc: t12 = join(t1, t2, keys='a', join_type=join_type) assert 'join requires masking column' in str(exc.value) with pytest.raises(TypeError) as exc: t12 = join(t1, t2, keys=['a', 'skycoord']) assert 'one or more key columns are not sortable' in str(exc.value) # Join does work for a mixin which is a subclass of np.ndarray with pytest.warns(MergeConflictWarning, match="In merged column 'quantity' the 'description' " "attribute does not match"): t12 = join(t1, t2, keys=['quantity']) assert np.all(t12['a_1'] == t1['a']) def test_hstack(table_types): """ Hstack tables with mixin cols. Use column "i" as proxy for what the result should be for each mixin. """ t1 = table_types.Table() t1['i'] = table_types.Column([0, 1, 2, 3]) for name, col in MIXIN_COLS.items(): t1[name] = col t1[name].info.description = name t1[name].info.meta = {'a': 1} for join_type in ('inner', 'outer'): for chop in (True, False): t2 = table_types.Table(t1) if chop: t2 = t2[:-1] if join_type == 'outer': with pytest.raises(NotImplementedError) as exc: t12 = hstack([t1, t2], join_type=join_type) assert 'hstack requires masking column' in str(exc.value) continue t12 = hstack([t1, t2], join_type=join_type) idx1 = t12['i_1'] idx2 = t12['i_2'] for name, col in MIXIN_COLS.items(): name1 = name + '_1' name2 = name + '_2' assert_table_name_col_equal(t12, name1, col[idx1]) assert_table_name_col_equal(t12, name2, col[idx2]) for attr in ('description', 'meta'): assert getattr(t1[name].info, attr) == getattr(t12[name1].info, attr) assert getattr(t2[name].info, attr) == getattr(t12[name2].info, attr) def assert_table_name_col_equal(t, name, col): """ Assert all(t[name] == col), with special handling for known mixin cols. """ if isinstance(col, coordinates.SkyCoord): assert np.all(t[name].ra == col.ra) assert np.all(t[name].dec == col.dec) elif isinstance(col, coordinates.BaseRepresentationOrDifferential): assert np.all(representation_equal(t[name], col)) elif isinstance(col, u.Quantity): if type(t) is QTable: assert np.all(t[name] == col) elif isinstance(col, table_helpers.ArrayWrapper): assert np.all(t[name].data == col.data) else: assert np.all(t[name] == col) def test_get_items(mixin_cols): """ Test that slicing / indexing table gives right values and col attrs inherit """ attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta') m = mixin_cols['m'] m.info.name = 'm' m.info.format = '{0}' m.info.description = 'd' m.info.meta = {'a': 1} t = QTable([m]) for item in ([1, 3], np.array([0, 2]), slice(1, 3)): t2 = t[item] m2 = m[item] assert_table_name_col_equal(t2, 'm', m[item]) for attr in attrs: assert getattr(t2['m'].info, attr) == getattr(m.info, attr) assert getattr(m2.info, attr) == getattr(m.info, attr) def test_info_preserved_pickle_copy_init(mixin_cols): """ Test copy, pickle, and init from class roundtrip preserve info. This tests not only the mixin classes but a regular column as well. """ def pickle_roundtrip(c): return pickle.loads(pickle.dumps(c)) def init_from_class(c): return c.__class__(c) attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta') for colname in ('i', 'm'): m = mixin_cols[colname] m.info.name = colname m.info.format = '{0}' m.info.description = 'd' m.info.meta = {'a': 1} for func in (copy.copy, copy.deepcopy, pickle_roundtrip, init_from_class): m2 = func(m) for attr in attrs: # non-native byteorder not preserved by last 2 func, _except_ for structured dtype if (attr != 'dtype' or getattr(m.info.dtype, 'isnative', True) or m.info.dtype.name.startswith('void') or func in (copy.copy, copy.deepcopy)): original = getattr(m.info, attr) else: # func does not preserve byteorder, check against (native) type. original = m.info.dtype.newbyteorder('=') assert getattr(m2.info, attr) == original def test_add_column(mixin_cols): """ Test that adding a column preserves values and attributes """ attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta') m = mixin_cols['m'] assert m.info.name is None # Make sure adding column in various ways doesn't touch t = QTable([m], names=['a']) assert m.info.name is None t['new'] = m assert m.info.name is None m.info.name = 'm' m.info.format = '{0}' m.info.description = 'd' m.info.meta = {'a': 1} t = QTable([m]) # Add columns m2, m3, m4 by two different methods and test expected equality t['m2'] = m m.info.name = 'm3' t.add_columns([m], copy=True) m.info.name = 'm4' t.add_columns([m], copy=False) for name in ('m2', 'm3', 'm4'): assert_table_name_col_equal(t, name, m) for attr in attrs: if attr != 'name': assert getattr(t['m'].info, attr) == getattr(t[name].info, attr) # Also check that one can set using a scalar. s = m[0] if type(s) is type(m) and 'info' in s.__dict__: # We're not going to worry about testing classes for which scalars # are a different class than the real array, or where info is not copied. t['s'] = m[0] assert_table_name_col_equal(t, 's', m[0]) for attr in attrs: if attr != 'name': assert getattr(t['m'].info, attr) == getattr(t['s'].info, attr) # While we're add it, also check a length-1 table. t = QTable([m[1:2]], names=['m']) if type(s) is type(m) and 'info' in s.__dict__: t['s'] = m[0] assert_table_name_col_equal(t, 's', m[0]) for attr in attrs: if attr != 'name': assert getattr(t['m'].info, attr) == getattr(t['s'].info, attr) def test_vstack(): """ Vstack tables with mixin cols. """ t1 = QTable(MIXIN_COLS) t2 = QTable(MIXIN_COLS) with pytest.raises(NotImplementedError): vstack([t1, t2]) def test_insert_row(mixin_cols): """ Test inserting a row, which works for Column, Quantity, Time and SkyCoord. """ t = QTable(mixin_cols) t0 = t.copy() t['m'].info.description = 'd' idxs = [0, -1, 1, 2, 3] if isinstance(t['m'], (u.Quantity, Column, time.Time, time.TimeDelta, coordinates.SkyCoord)): t.insert_row(1, t[-1]) for name in t.colnames: col = t[name] if isinstance(col, coordinates.SkyCoord): assert skycoord_equal(col, t0[name][idxs]) else: assert np.all(col == t0[name][idxs]) assert t['m'].info.description == 'd' else: with pytest.raises(ValueError) as exc: t.insert_row(1, t[-1]) assert "Unable to insert row" in str(exc.value) def test_insert_row_bad_unit(): """ Insert a row into a QTable with the wrong unit """ t = QTable([[1] * u.m]) with pytest.raises(ValueError) as exc: t.insert_row(0, (2 * u.m / u.s,)) assert "'m / s' (speed/velocity) and 'm' (length) are not convertible" in str(exc.value) def test_convert_np_array(mixin_cols): """ Test that converting to numpy array creates an object dtype and that each instance in the array has the expected type. """ t = QTable(mixin_cols) ta = t.as_array() m = mixin_cols['m'] dtype_kind = m.dtype.kind if hasattr(m, 'dtype') else 'O' assert ta['m'].dtype.kind == dtype_kind def test_assignment_and_copy(): """ Test that assignment of an int, slice, and fancy index works. Along the way test that copying table works. """ for name in ('quantity', 'arraywrap'): m = MIXIN_COLS[name] t0 = QTable([m], names=['m']) for i0, i1 in ((1, 2), (slice(0, 2), slice(1, 3)), (np.array([1, 2]), np.array([2, 3]))): t = t0.copy() t['m'][i0] = m[i1] if name == 'arraywrap': assert np.all(t['m'].data[i0] == m.data[i1]) assert np.all(t0['m'].data[i0] == m.data[i0]) assert np.all(t0['m'].data[i0] != t['m'].data[i0]) else: assert np.all(t['m'][i0] == m[i1]) assert np.all(t0['m'][i0] == m[i0]) assert np.all(t0['m'][i0] != t['m'][i0]) def test_conversion_qtable_table(): """ Test that a table round trips from QTable => Table => QTable """ qt = QTable(MIXIN_COLS) names = qt.colnames for name in names: qt[name].info.description = name t = Table(qt) for name in names: assert t[name].info.description == name if name == 'quantity': assert np.all(t['quantity'] == qt['quantity'].value) assert np.all(t['quantity'].unit is qt['quantity'].unit) assert isinstance(t['quantity'], t.ColumnClass) else: assert_table_name_col_equal(t, name, qt[name]) qt2 = QTable(qt) for name in names: assert qt2[name].info.description == name assert_table_name_col_equal(qt2, name, qt[name]) def test_setitem_as_column_name(): """ Test for mixin-related regression described in #3321. """ t = Table() t['a'] = ['x', 'y'] t['b'] = 'b' # Previously was failing with KeyError assert np.all(t['a'] == ['x', 'y']) assert np.all(t['b'] == ['b', 'b']) def test_quantity_representation(): """ Test that table representation of quantities does not have unit """ t = QTable([[1, 2] * u.m]) assert t.pformat() == ['col0', ' m ', '----', ' 1.0', ' 2.0'] def test_representation_representation(): """ Test that Representations are represented correctly. """ # With no unit we get "None" in the unit row c = coordinates.CartesianRepresentation([0], [1], [0], unit=u.one) t = Table([c]) assert t.pformat() == [' col0 ', '------------', '(0., 1., 0.)'] c = coordinates.CartesianRepresentation([0], [1], [0], unit='m') t = Table([c]) assert t.pformat() == [' col0 ', ' m ', '------------', '(0., 1., 0.)'] c = coordinates.SphericalRepresentation([10]*u.deg, [20]*u.deg, [1]*u.pc) t = Table([c]) assert t.pformat() == [' col0 ', ' deg, deg, pc ', '--------------', '(10., 20., 1.)'] c = coordinates.UnitSphericalRepresentation([10]*u.deg, [20]*u.deg) t = Table([c]) assert t.pformat() == [' col0 ', ' deg ', '----------', '(10., 20.)'] c = coordinates.SphericalCosLatDifferential( [10]*u.mas/u.yr, [2]*u.mas/u.yr, [10]*u.km/u.s) t = Table([c]) assert t.pformat() == [' col0 ', 'mas / yr, mas / yr, km / s', '--------------------------', ' (10., 2., 10.)'] def test_skycoord_representation(): """ Test that skycoord representation works, both in the way that the values are output and in changing the frame representation. """ # With no unit we get "None" in the unit row c = coordinates.SkyCoord([0], [1], [0], representation_type='cartesian') t = Table([c]) assert t.pformat() == [' col0 ', 'None,None,None', '--------------', ' 0.0,1.0,0.0'] # Test that info works with a dynamically changed representation c = coordinates.SkyCoord([0], [1], [0], unit='m', representation_type='cartesian') t = Table([c]) assert t.pformat() == [' col0 ', ' m,m,m ', '-----------', '0.0,1.0,0.0'] t['col0'].representation_type = 'unitspherical' assert t.pformat() == [' col0 ', 'deg,deg ', '--------', '90.0,0.0'] t['col0'].representation_type = 'cylindrical' assert t.pformat() == [' col0 ', ' m,deg,m ', '------------', '1.0,90.0,0.0'] def test_ndarray_mixin(): """ Test directly adding a plain structured array into a table instead of the view as an NdarrayMixin. Once added as an NdarrayMixin then all the previous tests apply. """ a = np.array([(1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')], dtype='<i4,' + ('|U1')) b = np.array([(10, 'aa'), (20, 'bb'), (30, 'cc'), (40, 'dd')], dtype=[('x', 'i4'), ('y', ('U2'))]) c = np.rec.fromrecords([(100., 'raa'), (200., 'rbb'), (300., 'rcc'), (400., 'rdd')], names=['rx', 'ry']) d = np.arange(8, dtype='i8').reshape(4, 2).view(NdarrayMixin) # Add one during initialization and the next as a new column. t = Table([a], names=['a']) t['b'] = b t['c'] = c t['d'] = d assert isinstance(t['a'], NdarrayMixin) assert t['a'][1][1] == a[1][1] assert t['a'][2][0] == a[2][0] assert t[1]['a'][1] == a[1][1] assert t[2]['a'][0] == a[2][0] assert isinstance(t['b'], NdarrayMixin) assert t['b'][1]['x'] == b[1]['x'] assert t['b'][1]['y'] == b[1]['y'] assert t[1]['b']['x'] == b[1]['x'] assert t[1]['b']['y'] == b[1]['y'] assert isinstance(t['c'], NdarrayMixin) assert t['c'][1]['rx'] == c[1]['rx'] assert t['c'][1]['ry'] == c[1]['ry'] assert t[1]['c']['rx'] == c[1]['rx'] assert t[1]['c']['ry'] == c[1]['ry'] assert isinstance(t['d'], NdarrayMixin) assert t['d'][1][0] == d[1][0] assert t['d'][1][1] == d[1][1] assert t[1]['d'][0] == d[1][0] assert t[1]['d'][1] == d[1][1] assert t.pformat(show_dtype=True) == [ ' a [f0, f1] b [x, y] c [rx, ry] d ', '(int32, str1) (int32, str2) (float64, str3) int64[2]', '------------- ------------- --------------- --------', " (1, 'a') (10, 'aa') (100., 'raa') 0 .. 1", " (2, 'b') (20, 'bb') (200., 'rbb') 2 .. 3", " (3, 'c') (30, 'cc') (300., 'rcc') 4 .. 5", " (4, 'd') (40, 'dd') (400., 'rdd') 6 .. 7"] def test_possible_string_format_functions(): """ The QuantityInfo info class for Quantity implements a possible_string_format_functions() method that overrides the standard pprint._possible_string_format_functions() function. Test this. """ t = QTable([[1, 2] * u.m]) t['col0'].info.format = '%.3f' assert t.pformat() == [' col0', ' m ', '-----', '1.000', '2.000'] t['col0'].info.format = 'hi {:.3f}' assert t.pformat() == [' col0 ', ' m ', '--------', 'hi 1.000', 'hi 2.000'] t['col0'].info.format = '.4f' assert t.pformat() == [' col0 ', ' m ', '------', '1.0000', '2.0000'] def test_rename_mixin_columns(mixin_cols): """ Rename a mixin column. """ t = QTable(mixin_cols) tc = t.copy() t.rename_column('m', 'mm') assert t.colnames == ['i', 'a', 'b', 'mm'] if isinstance(t['mm'], table_helpers.ArrayWrapper): assert np.all(t['mm'].data == tc['m'].data) elif isinstance(t['mm'], coordinates.SkyCoord): assert np.all(t['mm'].ra == tc['m'].ra) assert np.all(t['mm'].dec == tc['m'].dec) elif isinstance(t['mm'], coordinates.BaseRepresentationOrDifferential): assert np.all(representation_equal(t['mm'], tc['m'])) else: assert np.all(t['mm'] == tc['m']) def test_represent_mixins_as_columns_unit_fix(): """ If the unit is invalid for a column that gets serialized this would cause an exception. Fixed in #7481. """ t = Table({'a': [1, 2]}, masked=True) t['a'].unit = 'not a valid unit' t['a'].mask[1] = True serialize.represent_mixins_as_columns(t) def test_primary_data_column_gets_description(): """ If the mixin defines a primary data column, that should get the description, format, etc., so no __info__ should be needed. """ t = QTable({'a': [1, 2] * u.m}) t['a'].info.description = 'parrot' t['a'].info.format = '7.2f' tser = serialize.represent_mixins_as_columns(t) assert '__info__' not in tser.meta['__serialized_columns__']['a'] assert tser['a'].format == '7.2f' assert tser['a'].description == 'parrot' def test_skycoord_with_velocity(): # Regression test for gh-6447 sc = SkyCoord([1], [2], unit='deg', galcen_v_sun=None) t = Table([sc]) s = StringIO() t.write(s, format='ascii.ecsv', overwrite=True) s.seek(0) t2 = Table.read(s.read(), format='ascii.ecsv') assert skycoord_equal(t2['col0'], sc) @pytest.mark.parametrize('table_cls', [Table, QTable]) def test_ensure_input_info_is_unchanged(table_cls): """If a mixin input to a table has no info, it should stay that way. This since having 'info' slows down slicing, etc. See gh-11066. """ q = [1, 2] * u.m assert 'info' not in q.__dict__ t = table_cls([q], names=['q']) assert 'info' not in q.__dict__ t = table_cls([q]) assert 'info' not in q.__dict__ t = table_cls({'q': q}) assert 'info' not in q.__dict__ t['q2'] = q assert 'info' not in q.__dict__ sc = SkyCoord([1, 2], [2, 3], unit='deg') t['sc'] = sc assert 'info' not in sc.__dict__ def test_bad_info_class(): """Make a mixin column class that does not trigger the machinery to generate a pure column representation""" class MyArrayWrapper(ArrayWrapper): info = ParentDtypeInfo() t = Table() t['tm'] = MyArrayWrapper([0, 1, 2]) out = StringIO() match = r"failed to represent column 'tm' \(MyArrayWrapper\) as one or more Column subclasses" with pytest.raises(TypeError, match=match): represent_mixins_as_columns(t)
e9bda0d4e9d7bb30eabd5f803d0965b2c083748ad298b9246c57439adf01c4ac
# This Python file uses the following encoding: utf-8 # Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from io import StringIO from astropy import table from astropy.io import ascii from astropy.table import Table, QTable from astropy.table.table_helpers import simple_table from astropy import units as u from astropy.utils import console BIG_WIDE_ARR = np.arange(2000, dtype=np.float64).reshape(100, 20) SMALL_ARR = np.arange(18, dtype=np.int64).reshape(6, 3) @pytest.mark.usefixtures('table_type') class TestMultiD(): def test_multidim(self, table_type): """Test printing with multidimensional column""" arr = [np.array([[1, 2], [10, 20]], dtype=np.int64), np.array([[3, 4], [30, 40]], dtype=np.int64), np.array([[5, 6], [50, 60]], dtype=np.int64)] t = table_type(arr) lines = t.pformat(show_dtype=True) assert lines == [' col0 col1 col2 ', 'int64[2] int64[2] int64[2]', '-------- -------- --------', ' 1 .. 2 3 .. 4 5 .. 6', '10 .. 20 30 .. 40 50 .. 60'] lines = t.pformat(html=True, show_dtype=True) assert lines == [ f'<table id="table{id(t)}">', '<thead><tr><th>col0</th><th>col1</th><th>col2</th></tr></thead>', '<thead><tr><th>int64[2]</th><th>int64[2]</th><th>int64[2]</th></tr></thead>', '<tr><td>1 .. 2</td><td>3 .. 4</td><td>5 .. 6</td></tr>', '<tr><td>10 .. 20</td><td>30 .. 40</td><td>50 .. 60</td></tr>', '</table>'] nbclass = table.conf.default_notebook_table_class masked = 'masked=True ' if t.masked else '' assert t._repr_html_().splitlines() == [ f'<div><i>{table_type.__name__} {masked}length=2</i>', f'<table id="table{id(t)}" class="{nbclass}">', '<thead><tr><th>col0</th><th>col1</th><th>col2</th></tr></thead>', '<thead><tr><th>int64[2]</th><th>int64[2]</th><th>int64[2]</th></tr></thead>', '<tr><td>1 .. 2</td><td>3 .. 4</td><td>5 .. 6</td></tr>', '<tr><td>10 .. 20</td><td>30 .. 40</td><td>50 .. 60</td></tr>', '</table></div>'] t = table_type([arr]) lines = t.pformat(show_dtype=True) assert lines == [' col0 ', 'int64[2,2]', '----------', ' 1 .. 20', ' 3 .. 40', ' 5 .. 60'] def test_fake_multidim(self, table_type): """Test printing with 'fake' multidimensional column""" arr = [np.array([[(1,)], [(10,)]], dtype=np.int64), np.array([[(3,)], [(30,)]], dtype=np.int64), np.array([[(5,)], [(50,)]], dtype=np.int64)] t = table_type(arr) lines = t.pformat(show_dtype=True) assert lines == [ " col0 col1 col2 ", "int64[1,1] int64[1,1] int64[1,1]", "---------- ---------- ----------", " 1 3 5", " 10 30 50"] lines = t.pformat(html=True, show_dtype=True) assert lines == [ f'<table id="table{id(t)}">', '<thead><tr><th>col0</th><th>col1</th><th>col2</th></tr></thead>', '<thead><tr><th>int64[1,1]</th><th>int64[1,1]</th><th>int64[1,1]</th></tr></thead>', '<tr><td>1</td><td>3</td><td>5</td></tr>', '<tr><td>10</td><td>30</td><td>50</td></tr>', '</table>'] nbclass = table.conf.default_notebook_table_class masked = 'masked=True ' if t.masked else '' assert t._repr_html_().splitlines() == [ f'<div><i>{table_type.__name__} {masked}length=2</i>', f'<table id="table{id(t)}" class="{nbclass}">', '<thead><tr><th>col0</th><th>col1</th><th>col2</th></tr></thead>', '<thead><tr><th>int64[1,1]</th><th>int64[1,1]</th><th>int64[1,1]</th></tr></thead>', '<tr><td>1</td><td>3</td><td>5</td></tr>', '<tr><td>10</td><td>30</td><td>50</td></tr>', '</table></div>'] t = table_type([arr]) lines = t.pformat(show_dtype=True) assert lines == [' col0 ', 'int64[2,1,1]', '------------', ' 1 .. 10', ' 3 .. 30', ' 5 .. 50'] def test_html_escaping(): t = table.Table([('<script>alert("gotcha");</script>', 2, 3)]) nbclass = table.conf.default_notebook_table_class assert t._repr_html_().splitlines() == [ '<div><i>Table length=3</i>', f'<table id="table{id(t)}" class="{nbclass}">', '<thead><tr><th>col0</th></tr></thead>', '<thead><tr><th>str33</th></tr></thead>', '<tr><td>&lt;script&gt;alert(&quot;gotcha&quot;);&lt;/script&gt;</td></tr>', '<tr><td>2</td></tr>', '<tr><td>3</td></tr>', '</table></div>'] @pytest.mark.usefixtures('table_type') class TestPprint(): def _setup(self, table_type): self.tb = table_type(BIG_WIDE_ARR) self.tb['col0'].format = 'e' self.tb['col1'].format = '.6f' self.tb['col0'].unit = 'km**2' self.tb['col19'].unit = 'kg s m**-2' self.ts = table_type(SMALL_ARR) def test_empty_table(self, table_type): t = table_type() lines = t.pformat() assert lines == ['<No columns>'] c = repr(t) masked = 'masked=True ' if t.masked else '' assert c.splitlines() == [f'<{table_type.__name__} {masked}length=0>', '<No columns>'] def test_format0(self, table_type): """Try getting screen size but fail to defaults because testing doesn't have access to screen (fcntl.ioctl fails). """ self._setup(table_type) arr = np.arange(4000, dtype=np.float64).reshape(100, 40) lines = table_type(arr).pformat() nlines, width = console.terminal_size() assert len(lines) == nlines for line in lines[:-1]: # skip last "Length = .. rows" line assert width - 10 < len(line) <= width def test_format1(self, table_type): """Basic test of formatting, unit header row included""" self._setup(table_type) lines = self.tb.pformat(max_lines=8, max_width=40) assert lines == [' col0 col1 ... col19 ', ' km2 ... kg s / m2', '------------ ----------- ... ---------', '0.000000e+00 1.000000 ... 19.0', ' ... ... ... ...', '1.960000e+03 1961.000000 ... 1979.0', '1.980000e+03 1981.000000 ... 1999.0', 'Length = 100 rows'] def test_format2(self, table_type): """Basic test of formatting, unit header row excluded""" self._setup(table_type) lines = self.tb.pformat(max_lines=8, max_width=40, show_unit=False) assert lines == [' col0 col1 ... col19 ', '------------ ----------- ... ------', '0.000000e+00 1.000000 ... 19.0', '2.000000e+01 21.000000 ... 39.0', ' ... ... ... ...', '1.960000e+03 1961.000000 ... 1979.0', '1.980000e+03 1981.000000 ... 1999.0', 'Length = 100 rows'] def test_format3(self, table_type): """Include the unit header row""" self._setup(table_type) lines = self.tb.pformat(max_lines=8, max_width=40, show_unit=True) assert lines == [' col0 col1 ... col19 ', ' km2 ... kg s / m2', '------------ ----------- ... ---------', '0.000000e+00 1.000000 ... 19.0', ' ... ... ... ...', '1.960000e+03 1961.000000 ... 1979.0', '1.980000e+03 1981.000000 ... 1999.0', 'Length = 100 rows'] def test_format4(self, table_type): """Do not include the name header row""" self._setup(table_type) lines = self.tb.pformat(max_lines=8, max_width=40, show_name=False) assert lines == [' km2 ... kg s / m2', '------------ ----------- ... ---------', '0.000000e+00 1.000000 ... 19.0', '2.000000e+01 21.000000 ... 39.0', ' ... ... ... ...', '1.960000e+03 1961.000000 ... 1979.0', '1.980000e+03 1981.000000 ... 1999.0', 'Length = 100 rows'] def test_noclip(self, table_type): """Basic table print""" self._setup(table_type) lines = self.ts.pformat(max_lines=-1, max_width=-1) assert lines == ['col0 col1 col2', '---- ---- ----', ' 0 1 2', ' 3 4 5', ' 6 7 8', ' 9 10 11', ' 12 13 14', ' 15 16 17'] def test_clip1(self, table_type): """max lines below hard limit of 8 """ self._setup(table_type) lines = self.ts.pformat(max_lines=3, max_width=-1) assert lines == ['col0 col1 col2', '---- ---- ----', ' 0 1 2', ' 3 4 5', ' 6 7 8', ' 9 10 11', ' 12 13 14', ' 15 16 17'] def test_clip2(self, table_type): """max lines below hard limit of 8 and output longer than 8 """ self._setup(table_type) lines = self.ts.pformat(max_lines=3, max_width=-1, show_unit=True, show_dtype=True) assert lines == [' col0 col1 col2', ' ', 'int64 int64 int64', '----- ----- -----', ' 0 1 2', ' ... ... ...', ' 15 16 17', 'Length = 6 rows'] def test_clip3(self, table_type): """Max lines below hard limit of 8 and max width below hard limit of 10 """ self._setup(table_type) lines = self.ts.pformat(max_lines=3, max_width=1, show_unit=True) assert lines == ['col0 ...', ' ...', '---- ...', ' 0 ...', ' ... ...', ' 12 ...', ' 15 ...', 'Length = 6 rows'] def test_clip4(self, table_type): """Test a range of max_lines""" self._setup(table_type) for max_lines in (0, 1, 4, 5, 6, 7, 8, 100, 101, 102, 103, 104, 130): lines = self.tb.pformat(max_lines=max_lines, show_unit=False) assert len(lines) == max(8, min(102, max_lines)) def test_pformat_all(self, table_type): """Test that all rows are printed by default""" self._setup(table_type) lines = self.tb.pformat_all() # +3 accounts for the three header lines in this table assert len(lines) == BIG_WIDE_ARR.shape[0] + 3 @pytest.fixture def test_pprint_all(self, table_type, capsys): """Test that all rows are printed by default""" self._setup(table_type) self.tb.pprint_all() (out, err) = capsys.readouterr() # +3 accounts for the three header lines in this table assert len(out) == BIG_WIDE_ARR.shape[0] + 3 @pytest.mark.usefixtures('table_type') class TestFormat(): def test_column_format(self, table_type): t = table_type([[1, 2], [3, 4]], names=('a', 'b')) # default (format=None) assert str(t['a']) == ' a \n---\n 1\n 2' # just a plain format string t['a'].format = '5.2f' assert str(t['a']) == ' a \n-----\n 1.00\n 2.00' # Old-style that is almost new-style t['a'].format = '{ %4.2f }' assert str(t['a']) == ' a \n--------\n{ 1.00 }\n{ 2.00 }' # New-style that is almost old-style t['a'].format = '%{0:}' assert str(t['a']) == ' a \n---\n %1\n %2' # New-style with extra spaces t['a'].format = ' {0:05d} ' assert str(t['a']) == ' a \n-------\n 00001 \n 00002 ' # New-style has precedence t['a'].format = '%4.2f {0:}' assert str(t['a']) == ' a \n-------\n%4.2f 1\n%4.2f 2' # Invalid format spec with pytest.raises(ValueError): t['a'].format = 'fail' assert t['a'].format == '%4.2f {0:}' # format did not change def test_column_format_with_threshold(self, table_type): from astropy import conf with conf.set_temp('max_lines', 8): t = table_type([np.arange(20)], names=['a']) t['a'].format = '%{0:}' assert str(t['a']).splitlines() == [' a ', '---', ' %0', ' %1', '...', '%18', '%19', 'Length = 20 rows'] t['a'].format = '{ %4.2f }' assert str(t['a']).splitlines() == [' a ', '---------', ' { 0.00 }', ' { 1.00 }', ' ...', '{ 18.00 }', '{ 19.00 }', 'Length = 20 rows'] def test_column_format_func(self, table_type): # run most of functions twice # 1) astropy.table.pprint._format_funcs gets populated # 2) astropy.table.pprint._format_funcs gets used t = table_type([[1., 2.], [3, 4]], names=('a', 'b')) # mathematical function t['a'].format = lambda x: str(x * 3.) assert str(t['a']) == ' a \n---\n3.0\n6.0' assert str(t['a']) == ' a \n---\n3.0\n6.0' def test_column_format_callable(self, table_type): # run most of functions twice # 1) astropy.table.pprint._format_funcs gets populated # 2) astropy.table.pprint._format_funcs gets used t = table_type([[1., 2.], [3, 4]], names=('a', 'b')) # mathematical function class format: def __call__(self, x): return str(x * 3.) t['a'].format = format() assert str(t['a']) == ' a \n---\n3.0\n6.0' assert str(t['a']) == ' a \n---\n3.0\n6.0' def test_column_format_func_wrong_number_args(self, table_type): t = table_type([[1., 2.], [3, 4]], names=('a', 'b')) # function that expects wrong number of arguments def func(a, b): pass with pytest.raises(ValueError): t['a'].format = func def test_column_format_func_multiD(self, table_type): arr = [np.array([[1, 2], [10, 20]], dtype='i8')] t = table_type(arr, names=['a']) # mathematical function t['a'].format = lambda x: str(x * 3.) outstr = (' a \n' '------------\n' ' 3.0 .. 6.0\n' '30.0 .. 60.0') assert str(t['a']) == outstr def test_column_format_func_not_str(self, table_type): t = table_type([[1., 2.], [3, 4]], names=('a', 'b')) # mathematical function with pytest.raises(ValueError): t['a'].format = lambda x: x * 3 def test_column_alignment(self, table_type): t = table_type([[1], [2], [3], [4]], names=('long title a', 'long title b', 'long title c', 'long title d')) t['long title a'].format = '<' t['long title b'].format = '^' t['long title c'].format = '>' t['long title d'].format = '0=' assert str(t['long title a']) == 'long title a\n------------\n1 ' assert str(t['long title b']) == 'long title b\n------------\n 2 ' assert str(t['long title c']) == 'long title c\n------------\n 3' assert str(t['long title d']) == 'long title d\n------------\n000000000004' class TestFormatWithMaskedElements(): def test_column_format(self): t = Table([[1, 2, 3], [3, 4, 5]], names=('a', 'b'), masked=True) t['a'].mask = [True, False, True] # default (format=None) assert str(t['a']) == ' a \n---\n --\n 2\n --' # just a plain format string t['a'].format = '5.2f' assert str(t['a']) == ' a \n-----\n --\n 2.00\n --' # Old-style that is almost new-style t['a'].format = '{ %4.2f }' assert str(t['a']) == ' a \n--------\n --\n{ 2.00 }\n --' # New-style that is almost old-style t['a'].format = '%{0:}' assert str(t['a']) == ' a \n---\n --\n %2\n --' # New-style with extra spaces t['a'].format = ' {0:05d} ' assert str(t['a']) == ' a \n-------\n --\n 00002 \n --' # New-style has precedence t['a'].format = '%4.2f {0:}' assert str(t['a']) == ' a \n-------\n --\n%4.2f 2\n --' def test_column_format_with_threshold_masked_table(self): from astropy import conf with conf.set_temp('max_lines', 8): t = Table([np.arange(20)], names=['a'], masked=True) t['a'].format = '%{0:}' t['a'].mask[0] = True t['a'].mask[-1] = True assert str(t['a']).splitlines() == [' a ', '---', ' --', ' %1', '...', '%18', ' --', 'Length = 20 rows'] t['a'].format = '{ %4.2f }' assert str(t['a']).splitlines() == [' a ', '---------', ' --', ' { 1.00 }', ' ...', '{ 18.00 }', ' --', 'Length = 20 rows'] def test_column_format_func(self): # run most of functions twice # 1) astropy.table.pprint._format_funcs gets populated # 2) astropy.table.pprint._format_funcs gets used t = Table([[1., 2., 3.], [3, 4, 5]], names=('a', 'b'), masked=True) t['a'].mask = [True, False, True] # mathematical function t['a'].format = lambda x: str(x * 3.) assert str(t['a']) == ' a \n---\n --\n6.0\n --' assert str(t['a']) == ' a \n---\n --\n6.0\n --' def test_column_format_func_with_special_masked(self): # run most of functions twice # 1) astropy.table.pprint._format_funcs gets populated # 2) astropy.table.pprint._format_funcs gets used t = Table([[1., 2., 3.], [3, 4, 5]], names=('a', 'b'), masked=True) t['a'].mask = [True, False, True] # mathematical function def format_func(x): if x is np.ma.masked: return '!!' else: return str(x * 3.) t['a'].format = format_func assert str(t['a']) == ' a \n---\n !!\n6.0\n !!' assert str(t['a']) == ' a \n---\n !!\n6.0\n !!' def test_column_format_callable(self): # run most of functions twice # 1) astropy.table.pprint._format_funcs gets populated # 2) astropy.table.pprint._format_funcs gets used t = Table([[1., 2., 3.], [3, 4, 5]], names=('a', 'b'), masked=True) t['a'].mask = [True, False, True] # mathematical function class format: def __call__(self, x): return str(x * 3.) t['a'].format = format() assert str(t['a']) == ' a \n---\n --\n6.0\n --' assert str(t['a']) == ' a \n---\n --\n6.0\n --' def test_column_format_func_wrong_number_args(self): t = Table([[1., 2.], [3, 4]], names=('a', 'b'), masked=True) t['a'].mask = [True, False] # function that expects wrong number of arguments def func(a, b): pass with pytest.raises(ValueError): t['a'].format = func # but if all are masked, it never gets called t['a'].mask = [True, True] assert str(t['a']) == ' a \n---\n --\n --' def test_column_format_func_multiD(self): arr = [np.array([[1, 2], [10, 20]], dtype='i8')] t = Table(arr, names=['a'], masked=True) t['a'].mask[0, 1] = True t['a'].mask[1, 1] = True # mathematical function t['a'].format = lambda x: str(x * 3.) outstr = (' a \n' '----------\n' ' 3.0 .. --\n' '30.0 .. --') assert str(t['a']) == outstr assert str(t['a']) == outstr def test_pprint_npfloat32(): """ Test for #148, that np.float32 cannot by itself be formatted as float, but has to be converted to a python float. """ dat = np.array([1., 2.], dtype=np.float32) t = Table([dat], names=['a']) t['a'].format = '5.2f' assert str(t['a']) == ' a \n-----\n 1.00\n 2.00' def test_pprint_py3_bytes(): """ Test for #1346 and #4944. Make sure a bytestring (dtype=S<N>) in Python 3 is printed correctly (without the "b" prefix like b'string'). """ val = bytes('val', encoding='utf-8') blah = 'bläh'.encode('utf-8') dat = np.array([val, blah], dtype=[('col', 'S10')]) t = table.Table(dat) assert t['col'].pformat() == ['col ', '----', ' val', 'bläh'] def test_pprint_structured(): su = table.Column([(1, (1.5, [1.6, 1.7])), (2, (2.5, [2.6, 2.7]))], name='su', dtype=[('i', np.int64), ('f', [('p0', np.float64), ('p1', np.float64, (2,))])]) assert su.pformat() == [ " su [i, f[p0, p1]] ", "----------------------", "(1, (1.5, [1.6, 1.7]))", "(2, (2.5, [2.6, 2.7]))"] t = table.Table([su]) assert t.pformat() == su.pformat() assert repr(t).splitlines() == [ "<Table length=2>", " su [i, f[p0, p1]] ", "(int64, (float64, float64[2]))", "------------------------------", " (1, (1.5, [1.6, 1.7]))", " (2, (2.5, [2.6, 2.7]))"] def test_pprint_structured_with_format(): dtype = np.dtype([('par', 'f8'), ('min', 'f8'), ('id', 'i4'), ('name', 'U4')]) c = table.Column([(1.2345678, -20, 3, 'bar'), (12.345678, 4.5678, 33, 'foo')], dtype=dtype) t = table.Table() t['a'] = [1, 2] t['c'] = c t['c'].info.format = '{par:6.2f} {min:5.1f} {id:03d} {name:4s}' exp = [ ' a c [par, min, id, name]', '--- ----------------------', ' 1 1.23 -20.0 003 bar ', ' 2 12.35 4.6 033 foo '] assert t.pformat_all() == exp def test_pprint_nameless_col(): """Regression test for #2213, making sure a nameless column can be printed using None as the name. """ col = table.Column([1., 2.]) assert str(col).startswith('None') def test_html(): """Test HTML printing""" dat = np.array([1., 2.], dtype=np.float32) t = Table([dat], names=['a']) lines = t.pformat(html=True) assert lines == [f'<table id="table{id(t)}">', '<thead><tr><th>a</th></tr></thead>', '<tr><td>1.0</td></tr>', '<tr><td>2.0</td></tr>', '</table>'] lines = t.pformat(html=True, tableclass='table-striped') assert lines == [ f'<table id="table{id(t)}" class="table-striped">', '<thead><tr><th>a</th></tr></thead>', '<tr><td>1.0</td></tr>', '<tr><td>2.0</td></tr>', '</table>'] lines = t.pformat(html=True, tableclass=['table', 'table-striped']) assert lines == [ f'<table id="table{id(t)}" class="table table-striped">', '<thead><tr><th>a</th></tr></thead>', '<tr><td>1.0</td></tr>', '<tr><td>2.0</td></tr>', '</table>'] def test_align(): t = simple_table(2, kinds='iS') assert t.pformat() == [' a b ', '--- ---', ' 1 b', ' 2 c'] # Use column format attribute t['a'].format = '<' assert t.pformat() == [' a b ', '--- ---', '1 b', '2 c'] # Now override column format attribute with various combinations of align tpf = [' a b ', '--- ---', ' 1 b ', ' 2 c '] for align in ('^', ['^', '^'], ('^', '^')): assert tpf == t.pformat(align=align) assert t.pformat(align='<') == [' a b ', '--- ---', '1 b ', '2 c '] assert t.pformat(align='0=') == [' a b ', '--- ---', '001 00b', '002 00c'] assert t.pformat(align=['<', '^']) == [' a b ', '--- ---', '1 b ', '2 c '] # Now use fill characters. Stress the system using a fill # character that is the same as an align character. t = simple_table(2, kinds='iS') assert t.pformat(align='^^') == [' a b ', '--- ---', '^1^ ^b^', '^2^ ^c^'] assert t.pformat(align='^>') == [' a b ', '--- ---', '^^1 ^^b', '^^2 ^^c'] assert t.pformat(align='^<') == [' a b ', '--- ---', '1^^ b^^', '2^^ c^^'] # Complicated interaction (same as narrative docs example) t1 = Table([[1.0, 2.0], [1, 2]], names=['column1', 'column2']) t1['column1'].format = '#^.2f' assert t1.pformat() == ['column1 column2', '------- -------', '##1.00# 1', '##2.00# 2'] assert t1.pformat(align='!<') == ['column1 column2', '------- -------', '1.00!!! 1!!!!!!', '2.00!!! 2!!!!!!'] assert t1.pformat(align=[None, '!<']) == ['column1 column2', '------- -------', '##1.00# 1!!!!!!', '##2.00# 2!!!!!!'] # Zero fill t['a'].format = '+d' assert t.pformat(align='0=') == [' a b ', '--- ---', '+01 00b', '+02 00c'] with pytest.raises(ValueError): t.pformat(align=['fail']) with pytest.raises(TypeError): t.pformat(align=0) with pytest.raises(TypeError): t.pprint(align=0) # Make sure pprint() does not raise an exception t.pprint() with pytest.raises(ValueError): t.pprint(align=['<', '<', '<']) with pytest.raises(ValueError): t.pprint(align='x=') def test_auto_format_func(): """Test for #5802 (fix for #5800 where format_func key is not unique)""" t = Table([[1, 2] * u.m]) t['col0'].format = '%f' t.pformat() # Force caching of format function qt = QTable(t) qt.pformat() # Generates exception prior to #5802 def test_decode_replace(): """ Test printing a bytestring column with a value that fails decoding to utf-8 and gets replaced by U+FFFD. See https://docs.python.org/3/library/codecs.html#codecs.replace_errors """ t = Table([[b'Z\xf0']]) assert t.pformat() == ['col0', '----', ' Z\ufffd'] class TestColumnsShowHide: """Tests of show and hide table columns""" def setup_method(self): self.t = simple_table(size=1, cols=4, kinds='i') @pytest.mark.parametrize('attr', ('pprint_exclude_names', 'pprint_include_names')) def test_basic(self, attr): t = self.t assert repr(getattr(Table, attr)) == f'<PprintIncludeExclude name={attr} default=None>' t_show_hide = getattr(t, attr) assert repr(t_show_hide) == f'<PprintIncludeExclude name={attr} value=None>' # Default value is None assert t_show_hide() is None def test_slice(self): t = self.t t.pprint_include_names = 'a' t.pprint_exclude_names = 'b' t2 = t[0:1] assert t2.pprint_include_names() == ('a',) assert t2.pprint_exclude_names() == ('b',) def test_copy(self): t = self.t t.pprint_include_names = 'a' t.pprint_exclude_names = 'b' t2 = t.copy() assert t2.pprint_include_names() == ('a',) assert t2.pprint_exclude_names() == ('b',) t2.pprint_include_names = 'c' t2.pprint_exclude_names = 'd' assert t.pprint_include_names() == ('a',) assert t.pprint_exclude_names() == ('b',) assert t2.pprint_include_names() == ('c',) assert t2.pprint_exclude_names() == ('d',) @pytest.mark.parametrize('attr', ('pprint_exclude_names', 'pprint_include_names')) @pytest.mark.parametrize('value', ('z', ['a', 'z'])) def test_setting(self, attr, value): t = self.t t_show_hide = getattr(t, attr) # Expected attribute value ('z',) or ('a', 'z') exp = (value,) if isinstance(value, str) else tuple(value) # Context manager, can include column names that do not exist with t_show_hide.set(value): assert t_show_hide() == exp assert t.meta['__attributes__'] == {attr: exp} assert t_show_hide() is None # Setting back to None clears out meta assert t.meta == {} # Do `t.pprint_include_names/hide = value` setattr(t, attr, value) assert t_show_hide() == exp # Clear attribute t_show_hide.set(None) assert t_show_hide() is None # Now use set() method t_show_hide.set(value) assert t_show_hide() == exp with t_show_hide.set(None): assert t_show_hide() is None assert t.meta == {} assert t_show_hide() == exp @pytest.mark.parametrize('attr', ('pprint_exclude_names', 'pprint_include_names')) @pytest.mark.parametrize('value', ('z', ['a', 'z'], ('a', 'z'))) def test_add_remove(self, attr, value): t = self.t t_show_hide = getattr(t, attr) # Expected attribute value ('z') or ('a', 'z') exp = (value,) if isinstance(value, str) else tuple(value) # add() method for str or list of str t_show_hide.add(value) assert t_show_hide() == exp # Adding twice has no effect t_show_hide.add(value) assert t_show_hide() == exp # Remove values (str or list of str). Reverts to None if all names are # removed. t_show_hide.remove(value) assert t_show_hide() is None # Remove just one name, possibly leaving a name. t_show_hide.add(value) t_show_hide.remove('z') assert t_show_hide() == (None if value == 'z' else ('a',)) # Cannot remove name not in the list t_show_hide.set(['a', 'z']) with pytest.raises(ValueError, match=f'x not in {attr}'): t_show_hide.remove(('x', 'z')) @pytest.mark.parametrize('attr', ('pprint_exclude_names', 'pprint_include_names')) def test_rename(self, attr): t = self.t t_hide_show = getattr(t, attr) t_hide_show.set(['a', 'b']) t.rename_column('a', 'aa') assert t_hide_show() == ('aa', 'b') @pytest.mark.parametrize('attr', ('pprint_exclude_names', 'pprint_include_names')) def test_remove(self, attr): t = self.t t_hide_show = getattr(t, attr) t_hide_show.set(['a', 'b']) del t['a'] assert t_hide_show() == ('b',) def test_serialization(self): # Serialization works for ECSV. Currently fails for FITS, works with # HDF5. t = self.t t.pprint_exclude_names = ['a', 'y'] t.pprint_include_names = ['b', 'z'] out = StringIO() ascii.write(t, out, format='ecsv') t2 = ascii.read(out.getvalue(), format='ecsv') assert t2.pprint_exclude_names() == ('a', 'y') assert t2.pprint_include_names() == ('b', 'z') def test_output(self): """Test that pprint_include/exclude_names actually changes the print output""" t = self.t exp = [' b d ', '--- ---', ' 2 4'] with t.pprint_exclude_names.set(['a', 'c']): out = t.pformat_all() assert out == exp with t.pprint_include_names.set(['b', 'd']): out = t.pformat_all() assert out == exp with t.pprint_exclude_names.set(['a', 'c']): out = t.pformat_all() assert out == exp with t.pprint_include_names.set(['b', 'd']): out = t.pformat_all() assert out == exp # Mixture (not common in practice but possible). Note, the trailing # backslash instead of parens is needed for Python < 3.9. See: # https://bugs.python.org/issue12782. with t.pprint_include_names.set(['b', 'c', 'd']), \ t.pprint_exclude_names.set(['c']): out = t.pformat_all() assert out == exp def test_output_globs(self): """Test that pprint_include/exclude_names works with globs (fnmatch)""" t = self.t t['a2'] = 1 t['a23'] = 2 # Show only the a* columns exp = [' a a2 a23', '--- --- ---', ' 1 1 2'] with t.pprint_include_names.set('a*'): out = t.pformat_all() assert out == exp # Show a* but exclude a?? exp = [' a a2', '--- ---', ' 1 1'] with t.pprint_include_names.set('a*'), t.pprint_exclude_names.set('a??'): out = t.pformat_all() assert out == exp # Exclude a?? exp = [' a b c d a2', '--- --- --- --- ---', ' 1 2 3 4 1'] with t.pprint_exclude_names.set('a??'): out = t.pformat_all() assert out == exp def test_embedded_newline_tab(): """Newlines and tabs are escaped in table repr""" t = Table(rows=[['a', 'b \n c \t \n d'], ['x', 'y\n']]) exp = [ r'col0 col1 ', r'---- --------------', r' a b \n c \t \n d', r' x y\n'] assert t.pformat_all() == exp
e45171903853bfafc2f25724d639e1f4c2da981a9dc8c973f1a2123125a051d1
# Licensed under a 3-clause BSD style license - see LICENSE.rst import itertools import pytest import numpy as np from numpy.testing import assert_allclose from astropy.convolution.utils import discretize_model from astropy.modeling.functional_models import (Box1D, Box2D, Gaussian1D, Gaussian2D, RickerWavelet1D, RickerWavelet2D) from astropy.modeling.tests.example_models import models_1D, models_2D from astropy.modeling.tests.test_models import create_model from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa modes = ['center', 'linear_interp', 'oversample'] test_models_1D = [Gaussian1D, Box1D, RickerWavelet1D] test_models_2D = [Gaussian2D, Box2D, RickerWavelet2D] @pytest.mark.parametrize(('model_class', 'mode'), list(itertools.product(test_models_1D, modes))) def test_pixel_sum_1D(model_class, mode): """ Test if the sum of all pixels corresponds nearly to the integral. """ if model_class == Box1D and mode == "center": pytest.skip("Non integrating mode. Skip integral test.") parameters = models_1D[model_class] model = create_model(model_class, parameters) values = discretize_model(model, models_1D[model_class]['x_lim'], mode=mode) assert_allclose(values.sum(), models_1D[model_class]['integral'], atol=0.0001) @pytest.mark.parametrize('mode', modes) def test_gaussian_eval_1D(mode): """ Discretize Gaussian with different modes and check if result is at least similar to Gaussian1D.eval(). """ model = Gaussian1D(1, 0, 20) x = np.arange(-100, 101) values = model(x) disc_values = discretize_model(model, (-100, 101), mode=mode) assert_allclose(values, disc_values, atol=0.001) @pytest.mark.parametrize(('model_class', 'mode'), list(itertools.product(test_models_2D, modes))) def test_pixel_sum_2D(model_class, mode): """ Test if the sum of all pixels corresponds nearly to the integral. """ if model_class == Box2D and mode == "center": pytest.skip("Non integrating mode. Skip integral test.") parameters = models_2D[model_class] model = create_model(model_class, parameters) values = discretize_model(model, models_2D[model_class]['x_lim'], models_2D[model_class]['y_lim'], mode=mode) assert_allclose(values.sum(), models_2D[model_class]['integral'], atol=0.0001) @pytest.mark.parametrize(('model_class', 'mode'), list(itertools.product(test_models_2D, modes))) def test_pixel_sum_compound_2D(model_class, mode): """ Test if the sum of all pixels of a compound model corresponds nearly to the integral. """ if model_class == Box2D and mode == "center": pytest.skip("Non integrating mode. Skip integral test.") parameters = models_2D[model_class] model = create_model(model_class, parameters) values = discretize_model(model + model, models_2D[model_class]['x_lim'], models_2D[model_class]['y_lim'], mode=mode) model_integral = 2 * models_2D[model_class]['integral'] assert_allclose(values.sum(), model_integral, atol=0.0001) @pytest.mark.parametrize('mode', modes) def test_gaussian_eval_2D(mode): """ Discretize Gaussian with different modes and check if result is at least similar to Gaussian2D.eval() """ model = Gaussian2D(0.01, 0, 0, 1, 1) x = np.arange(-2, 3) y = np.arange(-2, 3) x, y = np.meshgrid(x, y) values = model(x, y) disc_values = discretize_model(model, (-2, 3), (-2, 3), mode=mode) assert_allclose(values, disc_values, atol=1e-2) @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.slow def test_gaussian_eval_2D_integrate_mode(): """ Discretize Gaussian with integrate mode """ model_list = [Gaussian2D(.01, 0, 0, 2, 2), Gaussian2D(.01, 0, 0, 1, 2), Gaussian2D(.01, 0, 0, 2, 1)] x = np.arange(-2, 3) y = np.arange(-2, 3) x, y = np.meshgrid(x, y) for model in model_list: values = model(x, y) disc_values = discretize_model(model, (-2, 3), (-2, 3), mode='integrate') assert_allclose(values, disc_values, atol=1e-2) @pytest.mark.skipif('not HAS_SCIPY') def test_subpixel_gauss_1D(): """ Test subpixel accuracy of the integrate mode with gaussian 1D model. """ gauss_1D = Gaussian1D(1, 0, 0.1) values = discretize_model(gauss_1D, (-1, 2), mode='integrate', factor=100) assert_allclose(values.sum(), np.sqrt(2 * np.pi) * 0.1, atol=0.00001) @pytest.mark.skipif('not HAS_SCIPY') def test_subpixel_gauss_2D(): """ Test subpixel accuracy of the integrate mode with gaussian 2D model. """ gauss_2D = Gaussian2D(1, 0, 0, 0.1, 0.1) values = discretize_model(gauss_2D, (-1, 2), (-1, 2), mode='integrate', factor=100) assert_allclose(values.sum(), 2 * np.pi * 0.01, atol=0.00001) def test_discretize_callable_1d(): """ Test discretize when a 1d function is passed. """ def f(x): return x ** 2 y = discretize_model(f, (-5, 6)) assert_allclose(y, np.arange(-5, 6) ** 2) def test_discretize_callable_2d(): """ Test discretize when a 2d function is passed. """ def f(x, y): return x ** 2 + y ** 2 actual = discretize_model(f, (-5, 6), (-5, 6)) y, x = (np.indices((11, 11)) - 5) desired = x ** 2 + y ** 2 assert_allclose(actual, desired) def test_type_exception(): """ Test type exception. """ with pytest.raises(TypeError) as exc: discretize_model(float(0), (-10, 11)) assert exc.value.args[0] == 'Model must be callable.' def test_dim_exception_1d(): """ Test dimension exception 1d. """ def f(x): return x ** 2 with pytest.raises(ValueError) as exc: discretize_model(f, (-10, 11), (-10, 11)) assert exc.value.args[0] == "y range specified, but model is only 1-d." def test_dim_exception_2d(): """ Test dimension exception 2d. """ def f(x, y): return x ** 2 + y ** 2 with pytest.raises(ValueError) as exc: discretize_model(f, (-10, 11)) assert exc.value.args[0] == "y range not specified, but model is 2-d" def test_float_x_range_exception(): def f(x, y): return x ** 2 + y ** 2 with pytest.raises(ValueError) as exc: discretize_model(f, (-10.002, 11.23)) assert exc.value.args[0] == ("The difference between the upper and lower" " limit of 'x_range' must be a whole number.") def test_float_y_range_exception(): def f(x, y): return x ** 2 + y ** 2 with pytest.raises(ValueError) as exc: discretize_model(f, (-10, 11), (-10.002, 11.23)) assert exc.value.args[0] == ("The difference between the upper and lower" " limit of 'y_range' must be a whole number.") def test_discretize_oversample(): gauss_2D = Gaussian2D(amplitude=1.0, x_mean=5., y_mean=125., x_stddev=0.75, y_stddev=3) values = discretize_model(gauss_2D, x_range=[0, 10], y_range=[100, 135], mode='oversample', factor=10) vmax = np.max(values) vmax_yx = np.unravel_index(values.argmax(), values.shape) values_osf1 = discretize_model(gauss_2D, x_range=[0, 10], y_range=[100, 135], mode='oversample', factor=1) values_center = discretize_model(gauss_2D, x_range=[0, 10], y_range=[100, 135], mode = 'center') assert values.shape == (35, 10) assert_allclose(vmax, 0.927, atol=1e-3) assert vmax_yx == (25, 5) assert_allclose(values_center, values_osf1)
85d963d76cac465509dce7b4bf7c4068c88db056ad49ac3ff9573ce80e433b72
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Module to test statistic functions """ import numpy as np # pylint: disable=invalid-name import pytest from numpy.testing import assert_almost_equal from astropy.modeling.models import Identity, Mapping from astropy.modeling.statistic import leastsquare, leastsquare_1d, leastsquare_2d, leastsquare_3d class TestLeastSquare_XD: """Tests for leastsquare with pre-specified number of dimensions.""" @classmethod def setup_class(cls): cls.model1D = Identity(n_inputs=1) cls.model2D = Identity(n_inputs=2) | Mapping((0,), n_inputs=2) cls.model3D = Identity(n_inputs=3) | Mapping((0,), n_inputs=3) cls.data = cls.x = cls.y = cls.z = np.linspace(0, 10, num=100) cls.lsq_exp = 0 def test_1d_no_weights(self): lsq = leastsquare_1d(self.data, self.model1D, None, self.x) assert_almost_equal(lsq, self.lsq_exp) def test_1d_with_weights(self): lsq = leastsquare_1d(self.data, self.model1D, np.ones(100), self.x) assert_almost_equal(lsq, self.lsq_exp) def test_2d_no_weights(self): lsq = leastsquare_2d(self.data, self.model2D, None, self.x, self.y) assert_almost_equal(lsq, self.lsq_exp) def test_2d_with_weights(self): lsq = leastsquare_2d( self.data, self.model2D, np.ones(100), self.x, self.y ) assert_almost_equal(lsq, self.lsq_exp) def test_3d_no_weights(self): lsq = leastsquare_3d( self.data, self.model3D, None, self.x, self.y, self.z ) assert_almost_equal(lsq, self.lsq_exp) def test_3d_with_weights(self): lsq = leastsquare_3d( self.data, self.model3D, np.ones(100), self.x, self.y, self.z ) assert_almost_equal(lsq, self.lsq_exp) class TestLeastSquare_ND: """Tests for leastsquare.""" @classmethod def setup_class(cls): cls.model1D = Identity(n_inputs=1) cls.model3D = Identity(n_inputs=3) | Mapping((0,), n_inputs=3) cls.data = cls.x = cls.y = cls.z = np.linspace(0, 10, num=100) cls.lsq_exp = 0 def test_1d_no_weights(self): lsq = leastsquare(self.data, self.model1D, None, self.x) assert_almost_equal(lsq, self.lsq_exp) def test_1d_with_weights(self): lsq = leastsquare(self.data, self.model1D, np.ones(100), self.x) assert_almost_equal(lsq, self.lsq_exp) def test_3d_no_weights(self): lsq = leastsquare( self.data, self.model3D, None, self.x, self.y, self.z ) assert_almost_equal(lsq, self.lsq_exp) def test_3d_with_weights(self): lsq = leastsquare( self.data, self.model3D, np.ones(100), self.x, self.y, self.z ) assert_almost_equal(lsq, self.lsq_exp) def test_shape_mismatch(self): with pytest.raises(ValueError): leastsquare(0, self.model1D, None, self.x)
304716f3bfc9d483d329c48f53bb0f81e23691a827f2704cb2456577d1bacece
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Test sky projections defined in WCS Paper II""" # pylint: disable=invalid-name, no-member import os import unittest.mock as mk import numpy as np import pytest from numpy.testing import assert_allclose, assert_almost_equal from astropy import units as u from astropy import wcs from astropy.io import fits from astropy.modeling import projections from astropy.modeling.parameters import InputParameterError from astropy.tests.helper import assert_quantity_allclose from astropy.utils.data import get_pkg_data_filename def test_new_wcslib_projections(): # Test that we are aware of all WCSLIB projections. # Dectect if a new WCSLIB release introduced new projections. assert not set(wcs.PRJ_CODES).symmetric_difference( projections.projcodes + projections._NOT_SUPPORTED_PROJ_CODES ) def test_Projection_properties(): projection = projections.Sky2Pix_PlateCarree() assert projection.n_inputs == 2 assert projection.n_outputs == 2 PIX_COORDINATES = [-10, 30] MAPS_DIR = os.path.join(os.pardir, os.pardir, "wcs", "tests", "data", "maps") pars = [(x,) for x in projections.projcodes] # There is no groundtruth file for the XPH projection available here: # https://www.atnf.csiro.au/people/mcalabre/WCS/example_data.html pars.remove(('XPH',)) @pytest.mark.parametrize(('code',), pars) def test_Sky2Pix(code): """Check astropy model eval against wcslib eval""" wcs_map = os.path.join(MAPS_DIR, f"1904-66_{code}.hdr") test_file = get_pkg_data_filename(wcs_map) header = fits.Header.fromfile(test_file, endcard=False, padding=False) params = [] for i in range(3): key = f'PV2_{i + 1}' if key in header: params.append(header[key]) w = wcs.WCS(header) w.wcs.crval = [0., 0.] w.wcs.crpix = [0, 0] w.wcs.cdelt = [1, 1] wcslibout = w.wcs.p2s([PIX_COORDINATES], 1) wcs_pix = w.wcs.s2p(wcslibout['world'], 1)['pixcrd'] model = getattr(projections, 'Sky2Pix_' + code) tinv = model(*params) x, y = tinv(wcslibout['phi'], wcslibout['theta']) assert_almost_equal(np.asarray(x), wcs_pix[:, 0]) assert_almost_equal(np.asarray(y), wcs_pix[:, 1]) assert isinstance(tinv.prjprm, wcs.Prjprm) @pytest.mark.parametrize(('code',), pars) def test_Pix2Sky(code): """Check astropy model eval against wcslib eval""" wcs_map = os.path.join(MAPS_DIR, f"1904-66_{code}.hdr") test_file = get_pkg_data_filename(wcs_map) header = fits.Header.fromfile(test_file, endcard=False, padding=False) params = [] for i in range(3): key = f'PV2_{i + 1}' if key in header: params.append(header[key]) w = wcs.WCS(header) w.wcs.crval = [0., 0.] w.wcs.crpix = [0, 0] w.wcs.cdelt = [1, 1] wcslibout = w.wcs.p2s([PIX_COORDINATES], 1) wcs_phi = wcslibout['phi'] wcs_theta = wcslibout['theta'] model = getattr(projections, 'Pix2Sky_' + code) tanprj = model(*params) phi, theta = tanprj(*PIX_COORDINATES) assert_almost_equal(np.asarray(phi), wcs_phi) assert_almost_equal(np.asarray(theta), wcs_theta) @pytest.mark.parametrize(('code',), pars) def test_Sky2Pix_unit(code): """Check astropy model eval against wcslib eval""" wcs_map = os.path.join(MAPS_DIR, f"1904-66_{code}.hdr") test_file = get_pkg_data_filename(wcs_map) header = fits.Header.fromfile(test_file, endcard=False, padding=False) params = [] for i in range(3): key = f'PV2_{i + 1}' if key in header: params.append(header[key]) w = wcs.WCS(header) w.wcs.crval = [0., 0.] w.wcs.crpix = [0, 0] w.wcs.cdelt = [1, 1] wcslibout = w.wcs.p2s([PIX_COORDINATES], 1) wcs_pix = w.wcs.s2p(wcslibout['world'], 1)['pixcrd'] model = getattr(projections, 'Sky2Pix_' + code) tinv = model(*params) x, y = tinv(wcslibout['phi'] * u.deg, wcslibout['theta'] * u.deg) assert_quantity_allclose(x, wcs_pix[:, 0] * u.deg) assert_quantity_allclose(y, wcs_pix[:, 1] * u.deg) @pytest.mark.parametrize(('code',), pars) def test_Pix2Sky_unit(code): """Check astropy model eval against wcslib eval""" wcs_map = os.path.join(MAPS_DIR, f"1904-66_{code}.hdr") test_file = get_pkg_data_filename(wcs_map) header = fits.Header.fromfile(test_file, endcard=False, padding=False) params = [] for i in range(3): key = f'PV2_{i + 1}' if key in header: params.append(header[key]) w = wcs.WCS(header) w.wcs.crval = [0., 0.] w.wcs.crpix = [0, 0] w.wcs.cdelt = [1, 1] wcslibout = w.wcs.p2s([PIX_COORDINATES], 1) wcs_phi = wcslibout['phi'] wcs_theta = wcslibout['theta'] model = getattr(projections, 'Pix2Sky_' + code) tanprj = model(*params) phi, theta = tanprj(*PIX_COORDINATES * u.deg) assert_quantity_allclose(phi, wcs_phi * u.deg) assert_quantity_allclose(theta, wcs_theta * u.deg) phi, theta = tanprj(*(PIX_COORDINATES * u.deg).to(u.rad)) assert_quantity_allclose(phi, wcs_phi * u.deg) assert_quantity_allclose(theta, wcs_theta * u.deg) phi, theta = tanprj(*(PIX_COORDINATES * u.deg).to(u.arcmin)) assert_quantity_allclose(phi, wcs_phi * u.deg) assert_quantity_allclose(theta, wcs_theta * u.deg) @pytest.mark.parametrize(('code',), pars) def test_projection_default(code): """Check astropy model eval with default parameters""" # Just makes sure that the default parameter values are reasonable # and accepted by wcslib. model = getattr(projections, 'Sky2Pix_' + code) tinv = model() x, y = tinv(45, 45) model = getattr(projections, 'Pix2Sky_' + code) tinv = model() x, y = tinv(0, 0) class TestZenithalPerspective: """Test Zenithal Perspective projection""" def setup_class(self): ID = 'AZP' wcs_map = os.path.join(MAPS_DIR, f"1904-66_{ID}.hdr") test_file = get_pkg_data_filename(wcs_map) header = fits.Header.fromfile(test_file, endcard=False, padding=False) self.wazp = wcs.WCS(header) self.wazp.wcs.crpix = np.array([0., 0.]) self.wazp.wcs.crval = np.array([0., 0.]) self.wazp.wcs.cdelt = np.array([1., 1.]) self.pv_kw = [kw[2] for kw in self.wazp.wcs.get_pv()] self.azp = projections.Pix2Sky_ZenithalPerspective(*self.pv_kw) def test_AZP_p2s(self): wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1) wcs_phi = wcslibout['phi'] wcs_theta = wcslibout['theta'] phi, theta = self.azp(-10, 30) assert_almost_equal(np.asarray(phi), wcs_phi) assert_almost_equal(np.asarray(theta), wcs_theta) def test_AZP_s2p(self): wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1) wcs_pix = self.wazp.wcs.s2p(wcslibout['world'], 1)['pixcrd'] x, y = self.azp.inverse(wcslibout['phi'], wcslibout['theta']) assert_almost_equal(np.asarray(x), wcs_pix[:, 0]) assert_almost_equal(np.asarray(y), wcs_pix[:, 1]) def test_validate(self): message = "Zenithal perspective projection is not defined for mu = -1" with pytest.raises(InputParameterError) as err: projections.Pix2Sky_ZenithalPerspective(-1) assert str(err.value) == message with pytest.raises(InputParameterError) as err: projections.Sky2Pix_ZenithalPerspective(-1) assert str(err.value) == message with pytest.raises(InputParameterError) as err: projections.Pix2Sky_SlantZenithalPerspective(-1) assert str(err.value) == message with pytest.raises(InputParameterError) as err: projections.Sky2Pix_SlantZenithalPerspective(-1) assert str(err.value) == message class TestCylindricalPerspective: """Test cylindrical perspective projection""" def setup_class(self): ID = "CYP" wcs_map = os.path.join(MAPS_DIR, f"1904-66_{ID}.hdr") test_file = get_pkg_data_filename(wcs_map) header = fits.Header.fromfile(test_file, endcard=False, padding=False) self.wazp = wcs.WCS(header) self.wazp.wcs.crpix = np.array([0., 0.]) self.wazp.wcs.crval = np.array([0., 0.]) self.wazp.wcs.cdelt = np.array([1., 1.]) self.pv_kw = [kw[2] for kw in self.wazp.wcs.get_pv()] self.azp = projections.Pix2Sky_CylindricalPerspective(*self.pv_kw) def test_CYP_p2s(self): wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1) wcs_phi = wcslibout['phi'] wcs_theta = wcslibout['theta'] phi, theta = self.azp(-10, 30) assert_almost_equal(np.asarray(phi), wcs_phi) assert_almost_equal(np.asarray(theta), wcs_theta) def test_CYP_s2p(self): wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1) wcs_pix = self.wazp.wcs.s2p(wcslibout['world'], 1)['pixcrd'] x, y = self.azp.inverse(wcslibout['phi'], wcslibout['theta']) assert_almost_equal(np.asarray(x), wcs_pix[:, 0]) assert_almost_equal(np.asarray(y), wcs_pix[:, 1]) def test_validate(self): message0 = "CYP projection is not defined for mu = -lambda" message1 = "CYP projection is not defined for lambda = -mu" # Pix2Sky_CylindricalPerspective with pytest.raises(InputParameterError) as err: projections.Pix2Sky_CylindricalPerspective(1, -1) assert str(err.value) == message0 or str(err.value) == message1 with pytest.raises(InputParameterError) as err: projections.Pix2Sky_CylindricalPerspective(-1, 1) assert str(err.value) == message0 or str(err.value) == message1 model = projections.Pix2Sky_CylindricalPerspective() with pytest.raises(InputParameterError) as err: model.mu = -1 assert str(err.value) == message0 with pytest.raises(InputParameterError) as err: model.lam = -1 assert str(err.value) == message1 # Sky2Pix_CylindricalPerspective with pytest.raises(InputParameterError) as err: projections.Sky2Pix_CylindricalPerspective(1, -1) assert str(err.value) == message0 or str(err.value) == message1 with pytest.raises(InputParameterError) as err: projections.Sky2Pix_CylindricalPerspective(-1, 1) assert str(err.value) == message0 or str(err.value) == message1 model = projections.Sky2Pix_CylindricalPerspective() with pytest.raises(InputParameterError) as err: model.mu = -1 assert str(err.value) == message0 with pytest.raises(InputParameterError) as err: model.lam = -1 assert str(err.value) == message1 def test_AffineTransformation2D(): # Simple test with a scale and translation model = projections.AffineTransformation2D( matrix=[[2, 0], [0, 2]], translation=[1, 1]) # Coordinates for vertices of a rectangle rect = [[0, 0], [1, 0], [0, 3], [1, 3]] x, y = zip(*rect) new_rect = np.vstack(model(x, y)).T assert np.all(new_rect == [[1, 1], [3, 1], [1, 7], [3, 7]]) # Matrix validation error with pytest.raises(InputParameterError) as err: model.matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] assert str(err.value) == "Expected transformation matrix to be a 2x2 array" # Translation validation error with pytest.raises(InputParameterError) as err: model.translation = [1, 2, 3] assert str(err.value) == ("Expected translation vector to be a " "2 element row or column vector array") with pytest.raises(InputParameterError) as err: model.translation = [[1], [2]] assert str(err.value) == ("Expected translation vector to be a " "2 element row or column vector array") with pytest.raises(InputParameterError) as err: model.translation = [[1, 2, 3]] assert str(err.value) == ("Expected translation vector to be a " "2 element row or column vector array") # Incompatible shape error a = np.array([[1], [2], [3], [4]]) b = a.ravel() with mk.patch.object(np, 'vstack', autospec=True, side_effect=[a, b]) as mk_vstack: message = "Incompatible input shapes" with pytest.raises(ValueError) as err: model(x, y) assert str(err.value) == message with pytest.raises(ValueError) as err: model(x, y) assert str(err.value) == message assert mk_vstack.call_count == 2 # Input shape evaluation error x = np.array([1, 2]) y = np.array([1, 2, 3]) with pytest.raises(ValueError) as err: model.evaluate(x, y, model.matrix, model.translation) assert str(err.value) == "Expected input arrays to have the same shape" def test_AffineTransformation2D_inverse(): # Test non-invertible model model1 = projections.AffineTransformation2D( matrix=[[1, 1], [1, 1]]) with pytest.raises(InputParameterError): model1.inverse model2 = projections.AffineTransformation2D( matrix=[[1.2, 3.4], [5.6, 7.8]], translation=[9.1, 10.11]) # Coordinates for vertices of a rectangle rect = [[0, 0], [1, 0], [0, 3], [1, 3]] x, y = zip(*rect) x_new, y_new = model2.inverse(*model2(x, y)) assert_allclose([x, y], [x_new, y_new], atol=1e-10) model3 = projections.AffineTransformation2D( matrix=[[1.2, 3.4], [5.6, 7.8]] * u.m, translation=[9.1, 10.11] * u.m) x_new, y_new = model3.inverse(*model3(x * u.m, y * u.m)) assert_allclose([x, y], [x_new, y_new], atol=1e-10) model4 = projections.AffineTransformation2D( matrix=[[1.2, 3.4], [5.6, 7.8]] * u.m, translation=[9.1, 10.11] * u.km) with pytest.raises(ValueError) as err: model4.inverse(*model4(x * u.m, y * u.m)) assert str(err.value) == "matrix and translation must have the same units." def test_c_projection_striding(): # This is just a simple test to make sure that the striding is # handled correctly in the projection C extension coords = np.arange(10).reshape((5, 2)) model = projections.Sky2Pix_ZenithalPerspective(2, 30) phi, theta = model(coords[:, 0], coords[:, 1]) assert_almost_equal( phi, [0., 2.2790416, 4.4889294, 6.6250643, 8.68301]) assert_almost_equal( theta, [-76.4816918, -75.3594654, -74.1256332, -72.784558, -71.3406629]) def test_c_projections_shaped(): nx, ny = (5, 2) x = np.linspace(0, 1, nx) y = np.linspace(0, 1, ny) xv, yv = np.meshgrid(x, y) model = projections.Pix2Sky_TAN() phi, theta = model(xv, yv) assert_allclose( phi, [[0., 90., 90., 90., 90.], [180., 165.96375653, 153.43494882, 143.13010235, 135.]]) assert_allclose( theta, [[90., 89.75000159, 89.50001269, 89.25004283, 89.00010152], [89.00010152, 88.96933478, 88.88210788, 88.75019826, 88.58607353]]) def test_affine_with_quantities(): x = 1 y = 2 xdeg = (x * u.pix).to(u.deg, equivalencies=u.pixel_scale(2.5 * u.deg / u.pix)) ydeg = (y * u.pix).to(u.deg, equivalencies=u.pixel_scale(2.5 * u.deg / u.pix)) xpix = x * u.pix ypix = y * u.pix # test affine with matrix only qaff = projections.AffineTransformation2D(matrix=[[1, 2], [2, 1]] * u.deg) with pytest.raises(ValueError): qx1, qy1 = qaff(xpix, ypix, equivalencies={ 'x': u.pixel_scale(2.5 * u.deg / u.pix), 'y': u.pixel_scale(2.5 * u.deg / u.pix)}) # test affine with matrix and translation qaff = projections.AffineTransformation2D(matrix=[[1, 2], [2, 1]] * u.deg, translation=[1, 2] * u.deg) qx1, qy1 = qaff(xpix, ypix, equivalencies={ 'x': u.pixel_scale(2.5 * u.deg / u.pix), 'y': u.pixel_scale(2.5 * u.deg / u.pix)}) aff = projections.AffineTransformation2D(matrix=[[1, 2], [2, 1]], translation=[1, 2]) x1, y1 = aff(xdeg.value, ydeg.value) assert_quantity_allclose(qx1, x1 * u.deg) assert_quantity_allclose(qy1, y1 * u.deg) # test the case of WCS PC and CDELT transformations pc = np.array([[0.86585778922708, 0.50029020461607], [-0.50029020461607, 0.86585778922708]]) cdelt = np.array([[1, 3.0683055555556E-05], [3.0966944444444E-05, 1]]) matrix = cdelt * pc qaff = projections.AffineTransformation2D(matrix=matrix * u.deg, translation=[0, 0] * u.deg) inv_matrix = np.linalg.inv(matrix) inv_qaff = projections.AffineTransformation2D(matrix=inv_matrix * u.pix, translation=[0, 0] * u.pix) qaff.inverse = inv_qaff qx1, qy1 = qaff(xpix, ypix, equivalencies={ 'x': u.pixel_scale(1 * u.deg / u.pix), 'y': u.pixel_scale(1 * u.deg / u.pix)}) x1, y1 = qaff.inverse(qx1, qy1, equivalencies={ 'x': u.pixel_scale(1 * u.deg / u.pix), 'y': u.pixel_scale(1 * u.deg / u.pix)}) assert_quantity_allclose(x1, xpix) assert_quantity_allclose(y1, ypix) def test_Pix2Sky_ZenithalPerspective_inverse(): model = projections.Pix2Sky_ZenithalPerspective(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_ZenithalPerspective) assert inverse.mu == model.mu == 2 assert_allclose(inverse.gamma, model.gamma) assert_allclose(inverse.gamma, 30) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_ZenithalPerspective_inverse(): model = projections.Sky2Pix_ZenithalPerspective(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_AZP) assert inverse.mu == model.mu == 2 assert_allclose(inverse.gamma, model.gamma) assert_allclose(inverse.gamma, 30) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_SlantZenithalPerspective_inverse(): model = projections.Pix2Sky_SlantZenithalPerspective(2, 30, 40) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_SlantZenithalPerspective) assert inverse.mu == model.mu == 2 assert_allclose(inverse.phi0, model.phi0) assert_allclose(inverse.theta0, model.theta0) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_SlantZenithalPerspective_inverse(): model = projections.Sky2Pix_SlantZenithalPerspective(2, 30, 40) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_SlantZenithalPerspective) assert inverse.mu == model.mu == 2 assert_allclose(inverse.phi0, model.phi0) assert_allclose(inverse.theta0, model.theta0) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_Gnomonic_inverse(): model = projections.Pix2Sky_Gnomonic() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_Gnomonic) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_Gnomonic_inverse(): model = projections.Sky2Pix_Gnomonic() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_Gnomonic) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_Stereographic_inverse(): model = projections.Pix2Sky_Stereographic() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_Stereographic) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_Stereographic_inverse(): model = projections.Sky2Pix_Stereographic() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_Stereographic) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_SlantOrthographic_inverse(): model = projections.Pix2Sky_SlantOrthographic(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_SlantOrthographic) assert inverse.xi == model.xi == 2 assert inverse.eta == model.eta == 30 x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-8) assert_allclose(b, y, atol=1e-8) def test_Sky2Pix_SlantOrthographic_inverse(): model = projections.Sky2Pix_SlantOrthographic(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_SlantOrthographic) assert inverse.xi == model.xi == 2 assert inverse.eta == model.eta == 30 x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-8) assert_allclose(b, y, atol=1e-8) def test_Pix2Sky_ZenithalEquidistant_inverse(): model = projections.Pix2Sky_ZenithalEquidistant() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_ZenithalEquidistant) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_ZenithalEquidistant_inverse(): model = projections.Sky2Pix_ZenithalEquidistant() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_ZenithalEquidistant) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_ZenithalEqualArea_inverse(): model = projections.Pix2Sky_ZenithalEqualArea() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_ZenithalEqualArea) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_ZenithalEqualArea_inverse(): model = projections.Sky2Pix_ZenithalEqualArea() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_ZenithalEqualArea) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_Airy_inverse(): model = projections.Pix2Sky_Airy(30) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_Airy) assert inverse.theta_b == model.theta_b == 30 x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_Airy_inverse(): model = projections.Sky2Pix_Airy(30) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_Airy) assert inverse.theta_b == model.theta_b == 30 x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_CylindricalPerspective_inverse(): model = projections.Pix2Sky_CylindricalPerspective(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_CylindricalPerspective) assert inverse.mu == model.mu == 2 assert inverse.lam == model.lam == 30 x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_CylindricalPerspective_inverse(): model = projections.Sky2Pix_CylindricalPerspective(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_CylindricalPerspective) assert inverse.mu == model.mu == 2 assert inverse.lam == model.lam == 30 x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_CylindricalEqualArea_inverse(): model = projections.Pix2Sky_CylindricalEqualArea(0.567) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_CylindricalEqualArea) assert inverse.lam == model.lam == 0.567 def test_Sky2Pix_CylindricalEqualArea_inverse(): model = projections.Sky2Pix_CylindricalEqualArea(0.765) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_CylindricalEqualArea) assert inverse.lam == model.lam == 0.765 def test_Pix2Sky_PlateCarree_inverse(): model = projections.Pix2Sky_PlateCarree() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_PlateCarree) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_PlateCarree_inverse(): model = projections.Sky2Pix_PlateCarree() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_PlateCarree) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_Mercator_inverse(): model = projections.Pix2Sky_Mercator() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_Mercator) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_Mercator_inverse(): model = projections.Sky2Pix_Mercator() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_Mercator) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_SansonFlamsteed_inverse(): model = projections.Pix2Sky_SansonFlamsteed() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_SansonFlamsteed) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_SansonFlamsteed_inverse(): model = projections.Sky2Pix_SansonFlamsteed() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_SansonFlamsteed) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_Parabolic_inverse(): model = projections.Pix2Sky_Parabolic() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_Parabolic) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_Parabolic_inverse(): model = projections.Sky2Pix_Parabolic() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_Parabolic) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_Molleweide_inverse(): model = projections.Pix2Sky_Molleweide() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_Molleweide) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_Molleweide_inverse(): model = projections.Sky2Pix_Molleweide() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_Molleweide) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_HammerAitoff_inverse(): model = projections.Pix2Sky_HammerAitoff() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_HammerAitoff) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_HammerAitoff_inverse(): model = projections.Sky2Pix_HammerAitoff() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_HammerAitoff) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_ConicPerspective_inverse(): model = projections.Pix2Sky_ConicPerspective(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_ConicPerspective) assert inverse.sigma == model.sigma == 2 assert_allclose(inverse.delta, model.delta) assert_allclose(inverse.delta, 30) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_ConicPerspective_inverse(): model = projections.Sky2Pix_ConicPerspective(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_ConicPerspective) assert inverse.sigma == model.sigma == 2 assert_allclose(inverse.delta, model.delta) assert_allclose(inverse.delta, 30) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_ConicEqualArea_inverse(): model = projections.Pix2Sky_ConicEqualArea(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_ConicEqualArea) assert inverse.sigma == model.sigma == 2 assert_allclose(inverse.delta, model.delta) assert_allclose(inverse.delta, 30) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_ConicEqualArea_inverse(): model = projections.Sky2Pix_ConicEqualArea(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_ConicEqualArea) assert inverse.sigma == model.sigma == 2 assert_allclose(inverse.delta, model.delta) assert_allclose(inverse.delta, 30) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_ConicEquidistant_inverse(): model = projections.Pix2Sky_ConicEquidistant(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_ConicEquidistant) assert inverse.sigma == model.sigma == 2 assert_allclose(inverse.delta, model.delta) assert_allclose(inverse.delta, 30) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_ConicEquidistant_inverse(): model = projections.Sky2Pix_ConicEquidistant(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_ConicEquidistant) assert inverse.sigma == model.sigma == 2 assert_allclose(inverse.delta, model.delta) assert_allclose(inverse.delta, 30) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_ConicOrthomorphic_inverse(): model = projections.Pix2Sky_ConicOrthomorphic(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_ConicOrthomorphic) assert inverse.sigma == model.sigma == 2 assert_allclose(inverse.delta, model.delta) assert_allclose(inverse.delta, 30) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_ConicOrthomorphic_inverse(): model = projections.Sky2Pix_ConicOrthomorphic(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_ConicOrthomorphic) assert inverse.sigma == model.sigma == 2 assert_allclose(inverse.delta, model.delta) assert_allclose(inverse.delta, 30) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_BonneEqualArea_inverse(): model = projections.Pix2Sky_BonneEqualArea(2) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_BonneEqualArea) assert inverse.theta1 == model.theta1 == 2 x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_BonneEqualArea_inverse(): model = projections.Sky2Pix_BonneEqualArea(2) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_BonneEqualArea) assert inverse.theta1 == model.theta1 == 2 x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_Polyconic_inverse(): model = projections.Pix2Sky_Polyconic() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_Polyconic) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_Polyconic_inverse(): model = projections.Sky2Pix_Polyconic() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_Polyconic) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_TangentialSphericalCube_inverse(): model = projections.Pix2Sky_TangentialSphericalCube() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_TangentialSphericalCube) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_TangentialSphericalCube_inverse(): model = projections.Sky2Pix_TangentialSphericalCube() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_TangentialSphericalCube) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_COBEQuadSphericalCube_inverse(): model = projections.Pix2Sky_COBEQuadSphericalCube() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_COBEQuadSphericalCube) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-3) assert_allclose(b, y, atol=1e-3) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-3) assert_allclose(b, y, atol=1e-3) def test_Sky2Pix_COBEQuadSphericalCube_inverse(): model = projections.Sky2Pix_COBEQuadSphericalCube() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_COBEQuadSphericalCube) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-3) assert_allclose(b, y, atol=1e-3) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-3) assert_allclose(b, y, atol=1e-3) def test_Pix2Sky_QuadSphericalCube_inverse(): model = projections.Pix2Sky_QuadSphericalCube() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_QuadSphericalCube) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_QuadSphericalCube_inverse(): model = projections.Sky2Pix_QuadSphericalCube() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_QuadSphericalCube) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_HEALPix_inverse(): model = projections.Pix2Sky_HEALPix(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_HEALPix) assert inverse.H == model.H == 2 assert inverse.X == model.X == 30 x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_HEALPix_inverse(): model = projections.Sky2Pix_HEALPix(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_HEALPix) assert inverse.H == model.H == 2 assert inverse.X == model.X == 30 x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_HEALPixPolar_inverse(): model = projections.Pix2Sky_HEALPixPolar() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_HEALPixPolar) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_HEALPixPolar_inverse(): model = projections.Sky2Pix_HEALPixPolar() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_HEALPixPolar) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12)