hash
stringlengths
64
64
content
stringlengths
0
1.51M
e60a388b9f59b3ec28d8c3d8da4e91171769d8537c5836ecf7b335bc10ed759d
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Astronomical and physics constants in cgs units. See :mod:`astropy.constants` for a complete listing of constants defined in Astropy. """ import itertools from .constant import Constant from . import codata2014, iau2015 for _nm, _c in itertools.chain(sorted(vars(codata2014).items()), sorted(vars(iau2015).items())): if (isinstance(_c, Constant) and _c.abbrev not in locals() and _c.system in ['esu', 'gauss', 'emu']): locals()[_c.abbrev] = _c
fbeda3e7dfbc977ade53fd2fb43d3f9bcbb3543e01aa09bbcd2288fca59c2292
# Licensed under a 3-clause BSD style license - see LICENSE.rst import functools import types import warnings import numpy as np from ..units.core import Unit, UnitsError from ..units.quantity import Quantity from ..utils import lazyproperty from ..utils.exceptions import AstropyUserWarning from ..utils.misc import InheritDocstrings __all__ = ['Constant', 'EMConstant'] class ConstantMeta(InheritDocstrings): """Metaclass for the :class:`Constant`. The primary purpose of this is to wrap the double-underscore methods of :class:`Quantity` which is the superclass of :class:`Constant`. In particular this wraps the operator overloads such as `__add__` to prevent their use with constants such as ``e`` from being used in expressions without specifying a system. The wrapper checks to see if the constant is listed (by name) in ``Constant._has_incompatible_units``, a set of those constants that are defined in different systems of units are physically incompatible. It also performs this check on each `Constant` if it hasn't already been performed (the check is deferred until the `Constant` is actually used in an expression to speed up import times, among other reasons). """ def __new__(mcls, name, bases, d): def wrap(meth): @functools.wraps(meth) def wrapper(self, *args, **kwargs): name_lower = self.name.lower() instances = self._registry[name_lower] if not self._checked_units: for inst in instances.values(): try: self.unit.to(inst.unit) except UnitsError: self._has_incompatible_units.add(name_lower) self._checked_units = True if (not self.system and name_lower in self._has_incompatible_units): systems = sorted([x for x in instances if x]) raise TypeError( 'Constant {0!r} does not have physically compatible ' 'units across all systems of units and cannot be ' 'combined with other values without specifying a ' 'system (eg. {1}.{2})'.format(self.abbrev, self.abbrev, systems[0])) return meth(self, *args, **kwargs) return wrapper # The wrapper applies to so many of the __ methods that it's easier to # just exclude the ones it doesn't apply to exclude = set(['__new__', '__array_finalize__', '__array_wrap__', '__dir__', '__getattr__', '__init__', '__str__', '__repr__', '__hash__', '__iter__', '__getitem__', '__len__', '__bool__', '__quantity_subclass__']) for attr, value in vars(Quantity).items(): if (isinstance(value, types.FunctionType) and attr.startswith('__') and attr.endswith('__') and attr not in exclude): d[attr] = wrap(value) return super().__new__(mcls, name, bases, d) class Constant(Quantity, metaclass=ConstantMeta): """A physical or astronomical constant. These objects are quantities that are meant to represent physical constants. """ _registry = {} _has_incompatible_units = set() def __new__(cls, abbrev, name, value, unit, uncertainty, reference=None, system=None): if reference is None: reference = getattr(cls, 'default_reference', None) if reference is None: raise TypeError("{} requires a reference.".format(cls)) name_lower = name.lower() instances = cls._registry.setdefault(name_lower, {}) # By-pass Quantity initialization, since units may not yet be # initialized here, and we store the unit in string form. inst = np.array(value).view(cls) if system in instances: warnings.warn('Constant {0!r} already has a definition in the ' '{1!r} system from {2!r} reference'.format( name, system, reference), AstropyUserWarning) for c in instances.values(): if system is not None and not hasattr(c.__class__, system): setattr(c, system, inst) if c.system is not None and not hasattr(inst.__class__, c.system): setattr(inst, c.system, c) instances[system] = inst inst._abbrev = abbrev inst._name = name inst._value = value inst._unit_string = unit inst._uncertainty = uncertainty inst._reference = reference inst._system = system inst._checked_units = False return inst def __repr__(self): return ('<{0} name={1!r} value={2} uncertainty={3} unit={4!r} ' 'reference={5!r}>'.format(self.__class__, self.name, self.value, self.uncertainty, str(self.unit), self.reference)) def __str__(self): return (' Name = {0}\n' ' Value = {1}\n' ' Uncertainty = {2}\n' ' Unit = {3}\n' ' Reference = {4}'.format(self.name, self.value, self.uncertainty, self.unit, self.reference)) def __quantity_subclass__(self, unit): return super().__quantity_subclass__(unit)[0], False def copy(self): """ Return a copy of this `Constant` instance. Since they are by definition immutable, this merely returns another reference to ``self``. """ return self __deepcopy__ = __copy__ = copy @property def abbrev(self): """A typical ASCII text abbreviation of the constant, also generally the same as the Python variable used for this constant. """ return self._abbrev @property def name(self): """The full name of the constant.""" return self._name @lazyproperty def _unit(self): """The unit(s) in which this constant is defined.""" return Unit(self._unit_string) @property def uncertainty(self): """The known uncertainty in this constant's value.""" return self._uncertainty @property def reference(self): """The source used for the value of this constant.""" return self._reference @property def system(self): """The system of units in which this constant is defined (typically `None` so long as the constant's units can be directly converted between systems). """ return self._system def _instance_or_super(self, key): instances = self._registry[self.name.lower()] inst = instances.get(key) if inst is not None: return inst else: return getattr(super(), key) @property def si(self): """If the Constant is defined in the SI system return that instance of the constant, else convert to a Quantity in the appropriate SI units. """ return self._instance_or_super('si') @property def cgs(self): """If the Constant is defined in the CGS system return that instance of the constant, else convert to a Quantity in the appropriate CGS units. """ return self._instance_or_super('cgs') def __array_finalize__(self, obj): for attr in ('_abbrev', '_name', '_value', '_unit_string', '_uncertainty', '_reference', '_system'): setattr(self, attr, getattr(obj, attr, None)) self._checked_units = getattr(obj, '_checked_units', False) class EMConstant(Constant): """An electromagnetic constant.""" @property def cgs(self): """Overridden for EMConstant to raise a `TypeError` emphasizing that there are multiple EM extensions to CGS. """ raise TypeError("Cannot convert EM constants to cgs because there " "are different systems for E.M constants within the " "c.g.s system (ESU, Gaussian, etc.). Instead, " "directly use the constant with the appropriate " "suffix (e.g. e.esu, e.gauss, etc.).")
e2de0414178ec0968eb53bc5462bac62db72a58c9a87a4478641362d2b4bf0f0
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Astronomical and physics constants in SI units. See :mod:`astropy.constants` for a complete listing of constants defined in Astropy. """ import itertools from .constant import Constant from . import codata2014, iau2015 for _nm, _c in itertools.chain(sorted(vars(codata2014).items()), sorted(vars(iau2015).items())): if (isinstance(_c, Constant) and _c.abbrev not in locals() and _c.system == 'si'): locals()[_c.abbrev] = _c
1e6b5d7aebdaea118d16b80cbf625f6b0b18bd374f9690988f071c8c99cdfd80
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains dictionaries with sets of parameters for a given cosmology. Each cosmology has the following parameters defined: ========== ===================================== Oc0 Omega cold dark matter at z=0 Ob0 Omega baryon at z=0 Om0 Omega matter at z=0 flat Is this assumed flat? If not, Ode0 must be specified Ode0 Omega dark energy at z=0 if flat is False H0 Hubble parameter at z=0 in km/s/Mpc n Density perturbation spectral index Tcmb0 Current temperature of the CMB Neff Effective number of neutrino species sigma8 Density perturbation amplitude tau Ionisation optical depth z_reion Redshift of hydrogen reionisation t0 Age of the universe in Gyr reference Reference for the parameters ========== ===================================== The list of cosmologies available are given by the tuple `available`. Current cosmologies available: Planck 2015 (Planck15) parameters from Planck Collaboration 2016, A&A, 594, A13 (Paper XIII), Table 4 (TT, TE, EE + lowP + lensing + ext) Planck 2013 (Planck13) parameters from Planck Collaboration 2014, A&A, 571, A16 (Paper XVI), Table 5 (Planck + WP + highL + BAO) WMAP 9 year (WMAP9) parameters from Hinshaw et al. 2013, ApJS, 208, 19, doi: 10.1088/0067-0049/208/2/19. Table 4 (WMAP9 + eCMB + BAO + H0) WMAP 7 year (WMAP7) parameters from Komatsu et al. 2011, ApJS, 192, 18, doi: 10.1088/0067-0049/192/2/18. Table 1 (WMAP + BAO + H0 ML). WMAP 5 year (WMAP5) parameters from Komatsu et al. 2009, ApJS, 180, 330, doi: 10.1088/0067-0049/180/2/330. Table 1 (WMAP + BAO + SN ML). """ # Note: if you add a new cosmology, please also update the table # in the 'Built-in Cosmologies' section of astropy/docs/cosmology/index.rst # in addition to the list above. You also need to add them to the 'available' # list at the bottom of this file. # Planck 2015 paper XII Table 4 final column (best fit) Planck15 = dict( Oc0=0.2589, Ob0=0.04860, Om0=0.3075, H0=67.74, n=0.9667, sigma8=0.8159, tau=0.066, z_reion=8.8, t0=13.799, Tcmb0=2.7255, Neff=3.046, flat=True, m_nu=[0., 0., 0.06], reference=("Planck Collaboration 2016, A&A, 594, A13 (Paper XIII)," " Table 4 (TT, TE, EE + lowP + lensing + ext)") ) # Planck 2013 paper XVI Table 5 penultimate column (best fit) Planck13 = dict( Oc0=0.25886, Ob0=0.048252, Om0=0.30712, H0=67.77, n=0.9611, sigma8=0.8288, tau=0.0952, z_reion=11.52, t0=13.7965, Tcmb0=2.7255, Neff=3.046, flat=True, m_nu=[0., 0., 0.06], reference=("Planck Collaboration 2014, A&A, 571, A16 (Paper XVI)," " Table 5 (Planck + WP + highL + BAO)") ) WMAP9 = dict( Oc0=0.2402, Ob0=0.04628, Om0=0.2865, H0=69.32, n=0.9608, sigma8=0.820, tau=0.081, z_reion=10.1, t0=13.772, Tcmb0=2.725, Neff=3.04, m_nu=0.0, flat=True, reference=("Hinshaw et al. 2013, ApJS, 208, 19, " "doi: 10.1088/0067-0049/208/2/19. " "Table 4 (WMAP9 + eCMB + BAO + H0, last column)") ) WMAP7 = dict( Oc0=0.226, Ob0=0.0455, Om0=0.272, H0=70.4, n=0.967, sigma8=0.810, tau=0.085, z_reion=10.3, t0=13.76, Tcmb0=2.725, Neff=3.04, m_nu=0.0, flat=True, reference=("Komatsu et al. 2011, ApJS, 192, 18, " "doi: 10.1088/0067-0049/192/2/18. " "Table 1 (WMAP + BAO + H0 ML).") ) WMAP5 = dict( Oc0=0.231, Ob0=0.0459, Om0=0.277, H0=70.2, n=0.962, sigma8=0.817, tau=0.088, z_reion=11.3, t0=13.72, Tcmb0=2.725, Neff=3.04, m_nu=0.0, flat=True, reference=("Komatsu et al. 2009, ApJS, 180, 330, " "doi: 10.1088/0067-0049/180/2/330. " "Table 1 (WMAP + BAO + SN ML).") ) # If new parameters are added, this list must be updated available = ['Planck15', 'Planck13', 'WMAP9', 'WMAP7', 'WMAP5']
1354a4d6ece8a9079ee722c28be21744041ae3aa6f674d97d80afdf23b40070e
# Licensed under a 3-clause BSD style license - see LICENSE.rst import sys from math import sqrt, pi, exp, log, floor from abc import ABCMeta, abstractmethod from inspect import signature import numpy as np from . import scalar_inv_efuncs from .. import constants as const from .. import units as u from ..utils import isiterable from ..utils.state import ScienceState from . import parameters # Originally authored by Andrew Becker ([email protected]), # and modified by Neil Crighton ([email protected]) and Roban # Kramer ([email protected]). # Many of these adapted from Hogg 1999, astro-ph/9905116 # and Linder 2003, PRL 90, 91301 __all__ = ["FLRW", "LambdaCDM", "FlatLambdaCDM", "wCDM", "FlatwCDM", "Flatw0waCDM", "w0waCDM", "wpwaCDM", "w0wzCDM", "default_cosmology"] + parameters.available __doctest_requires__ = {'*': ['scipy.integrate', 'scipy.special']} # Notes about speeding up integrals: # --------------------------------- # The supplied cosmology classes use a few tricks to speed # up distance and time integrals. It is not necessary for # anyone subclassing FLRW to use these tricks -- but if they # do, such calculations may be a lot faster. # The first, more basic, idea is that, in many cases, it's a big deal to # provide explicit formulae for inv_efunc rather than simply # setting up de_energy_scale -- assuming there is a nice expression. # As noted above, almost all of the provided classes do this, and # that template can pretty much be followed directly with the appropriate # formula changes. # The second, and more advanced, option is to also explicitly # provide a scalar only version of inv_efunc. This results in a fairly # large speedup (>10x in most cases) in the distance and age integrals, # even if only done in python, because testing whether the inputs are # iterable or pure scalars turns out to be rather expensive. To take # advantage of this, the key thing is to explicitly set the # instance variables self._inv_efunc_scalar and self._inv_efunc_scalar_args # in the constructor for the subclass, where the latter are all the # arguments except z to _inv_efunc_scalar. # # The provided classes do use this optimization, and in fact go # even further and provide optimizations for no radiation, and for radiation # with massless neutrinos coded in cython. Consult the subclasses for # details, and scalar_inv_efuncs for the details. # # However, the important point is that it is -not- necessary to do this. # Some conversion constants -- useful to compute them once here # and reuse in the initialization rather than have every object do them # Note that the call to cgs is actually extremely expensive, # so we actually skip using the units package directly, and # hardwire the conversion from mks to cgs. This assumes that constants # will always return mks by default -- if this is made faster for simple # cases like this, it should be changed back. # Note that the unit tests should catch it if this happens H0units_to_invs = (u.km / (u.s * u.Mpc)).to(1.0 / u.s) sec_to_Gyr = u.s.to(u.Gyr) # const in critical density in cgs units (g cm^-3) critdens_const = 3. / (8. * pi * const.G.value * 1000) arcsec_in_radians = pi / (3600. * 180) arcmin_in_radians = pi / (60. * 180) # Radiation parameter over c^2 in cgs (g cm^-3 K^-4) a_B_c2 = 4e-3 * const.sigma_sb.value / const.c.value ** 3 # Boltzmann constant in eV / K kB_evK = const.k_B.to(u.eV / u.K) class CosmologyError(Exception): pass class Cosmology: """ Placeholder for when a more general Cosmology class is implemented. """ class FLRW(Cosmology, metaclass=ABCMeta): """ A class describing an isotropic and homogeneous (Friedmann-Lemaitre-Robertson-Walker) cosmology. This is an abstract base class -- you can't instantiate examples of this class, but must work with one of its subclasses such as `LambdaCDM` or `wCDM`. Parameters ---------- H0 : float or scalar `~astropy.units.Quantity` Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. Note that this does not include massive neutrinos. Ode0 : float Omega dark energy: density of dark energy in units of the critical density at z=0. Tcmb0 : float or scalar `~astropy.units.Quantity`, optional Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K]. Setting this to zero will turn off both photons and neutrinos (even massive ones). Neff : float, optional Effective number of Neutrino species. Default 3.04. m_nu : `~astropy.units.Quantity`, optional Mass of each neutrino species. If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Typically this means you should provide three neutrino masses unless you are considering something like a sterile neutrino. Ob0 : float or None, optional Omega baryons: density of baryonic matter in units of the critical density at z=0. If this is set to None (the default), any computation that requires its value will raise an exception. name : str, optional Name for this cosmological object. Notes ----- Class instances are static -- you can't change the values of the parameters. That is, all of the attributes above are read only. """ def __init__(self, H0, Om0, Ode0, Tcmb0=0, Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None): # all densities are in units of the critical density self._Om0 = float(Om0) if self._Om0 < 0.0: raise ValueError("Matter density can not be negative") self._Ode0 = float(Ode0) if Ob0 is not None: self._Ob0 = float(Ob0) if self._Ob0 < 0.0: raise ValueError("Baryonic density can not be negative") if self._Ob0 > self._Om0: raise ValueError("Baryonic density can not be larger than " "total matter density") self._Odm0 = self._Om0 - self._Ob0 else: self._Ob0 = None self._Odm0 = None self._Neff = float(Neff) if self._Neff < 0.0: raise ValueError("Effective number of neutrinos can " "not be negative") self.name = name # Tcmb may have units self._Tcmb0 = u.Quantity(Tcmb0, unit=u.K) if not self._Tcmb0.isscalar: raise ValueError("Tcmb0 is a non-scalar quantity") # Hubble parameter at z=0, km/s/Mpc self._H0 = u.Quantity(H0, unit=u.km / u.s / u.Mpc) if not self._H0.isscalar: raise ValueError("H0 is a non-scalar quantity") # 100 km/s/Mpc * h = H0 (so h is dimensionless) self._h = self._H0.value / 100. # Hubble distance self._hubble_distance = (const.c / self._H0).to(u.Mpc) # H0 in s^-1; don't use units for speed H0_s = self._H0.value * H0units_to_invs # Hubble time; again, avoiding units package for speed self._hubble_time = u.Quantity(sec_to_Gyr / H0_s, u.Gyr) # critical density at z=0 (grams per cubic cm) cd0value = critdens_const * H0_s ** 2 self._critical_density0 = u.Quantity(cd0value, u.g / u.cm ** 3) # Load up neutrino masses. Note: in Py2.x, floor is floating self._nneutrinos = int(floor(self._Neff)) # We are going to share Neff between the neutrinos equally. # In detail this is not correct, but it is a standard assumption # because properly calculating it is a) complicated b) depends # on the details of the massive neutrinos (e.g., their weak # interactions, which could be unusual if one is considering sterile # neutrinos) self._massivenu = False if self._nneutrinos > 0 and self._Tcmb0.value > 0: self._neff_per_nu = self._Neff / self._nneutrinos # We can't use the u.Quantity constructor as we do above # because it doesn't understand equivalencies if not isinstance(m_nu, u.Quantity): raise ValueError("m_nu must be a Quantity") m_nu = m_nu.to(u.eV, equivalencies=u.mass_energy()) # Now, figure out if we have massive neutrinos to deal with, # and, if so, get the right number of masses # It is worth the effort to keep track of massless ones separately # (since they are quite easy to deal with, and a common use case # is to set only one neutrino to have mass) if m_nu.isscalar: # Assume all neutrinos have the same mass if m_nu.value == 0: self._nmasslessnu = self._nneutrinos self._nmassivenu = 0 else: self._massivenu = True self._nmasslessnu = 0 self._nmassivenu = self._nneutrinos self._massivenu_mass = (m_nu.value * np.ones(self._nneutrinos)) else: # Make sure we have the right number of masses # -unless- they are massless, in which case we cheat a little if m_nu.value.min() < 0: raise ValueError("Invalid (negative) neutrino mass" " encountered") if m_nu.value.max() == 0: self._nmasslessnu = self._nneutrinos self._nmassivenu = 0 else: self._massivenu = True if len(m_nu) != self._nneutrinos: errstr = "Unexpected number of neutrino masses" raise ValueError(errstr) # Segregate out the massless ones self._nmasslessnu = len(np.nonzero(m_nu.value == 0)[0]) self._nmassivenu = self._nneutrinos - self._nmasslessnu w = np.nonzero(m_nu.value > 0)[0] self._massivenu_mass = m_nu[w] # Compute photon density, Tcmb, neutrino parameters # Tcmb0=0 removes both photons and neutrinos, is handled # as a special case for efficiency if self._Tcmb0.value > 0: # Compute photon density from Tcmb self._Ogamma0 = a_B_c2 * self._Tcmb0.value ** 4 /\ self._critical_density0.value # Compute Neutrino temperature # The constant in front is (4/11)^1/3 -- see any # cosmology book for an explanation -- for example, # Weinberg 'Cosmology' p 154 eq (3.1.21) self._Tnu0 = 0.7137658555036082 * self._Tcmb0 # Compute Neutrino Omega and total relativistic component # for massive neutrinos. We also store a list version, # since that is more efficient to do integrals with (perhaps # surprisingly! But small python lists are more efficient # than small numpy arrays). if self._massivenu: nu_y = self._massivenu_mass / (kB_evK * self._Tnu0) self._nu_y = nu_y.value self._nu_y_list = self._nu_y.tolist() self._Onu0 = self._Ogamma0 * self.nu_relative_density(0) else: # This case is particularly simple, so do it directly # The 0.2271... is 7/8 (4/11)^(4/3) -- the temperature # bit ^4 (blackbody energy density) times 7/8 for # FD vs. BE statistics. self._Onu0 = 0.22710731766 * self._Neff * self._Ogamma0 else: self._Ogamma0 = 0.0 self._Tnu0 = u.Quantity(0.0, u.K) self._Onu0 = 0.0 # Compute curvature density self._Ok0 = 1.0 - self._Om0 - self._Ode0 - self._Ogamma0 - self._Onu0 # Subclasses should override this reference if they provide # more efficient scalar versions of inv_efunc. self._inv_efunc_scalar = self.inv_efunc self._inv_efunc_scalar_args = () def _namelead(self): """ Helper function for constructing __repr__""" if self.name is None: return "{0}(".format(self.__class__.__name__) else: return "{0}(name=\"{1}\", ".format(self.__class__.__name__, self.name) def __repr__(self): retstr = "{0}H0={1:.3g}, Om0={2:.3g}, Ode0={3:.3g}, "\ "Tcmb0={4:.4g}, Neff={5:.3g}, m_nu={6}, "\ "Ob0={7:s})" return retstr.format(self._namelead(), self._H0, self._Om0, self._Ode0, self._Tcmb0, self._Neff, self.m_nu, _float_or_none(self._Ob0)) # Set up a set of properties for H0, Om0, Ode0, Ok0, etc. for user access. # Note that we don't let these be set (so, obj.Om0 = value fails) @property def H0(self): """ Return the Hubble constant as an `~astropy.units.Quantity` at z=0""" return self._H0 @property def Om0(self): """ Omega matter; matter density/critical density at z=0""" return self._Om0 @property def Ode0(self): """ Omega dark energy; dark energy density/critical density at z=0""" return self._Ode0 @property def Ob0(self): """ Omega baryon; baryonic matter density/critical density at z=0""" return self._Ob0 @property def Odm0(self): """ Omega dark matter; dark matter density/critical density at z=0""" return self._Odm0 @property def Ok0(self): """ Omega curvature; the effective curvature density/critical density at z=0""" return self._Ok0 @property def Tcmb0(self): """ Temperature of the CMB as `~astropy.units.Quantity` at z=0""" return self._Tcmb0 @property def Tnu0(self): """ Temperature of the neutrino background as `~astropy.units.Quantity` at z=0""" return self._Tnu0 @property def Neff(self): """ Number of effective neutrino species""" return self._Neff @property def has_massive_nu(self): """ Does this cosmology have at least one massive neutrino species?""" if self._Tnu0.value == 0: return False return self._massivenu @property def m_nu(self): """ Mass of neutrino species""" if self._Tnu0.value == 0: return None if not self._massivenu: # Only massless return u.Quantity(np.zeros(self._nmasslessnu), u.eV) if self._nmasslessnu == 0: # Only massive return u.Quantity(self._massivenu_mass, u.eV) # A mix -- the most complicated case numass = np.append(np.zeros(self._nmasslessnu), self._massivenu_mass.value) return u.Quantity(numass, u.eV) @property def h(self): """ Dimensionless Hubble constant: h = H_0 / 100 [km/sec/Mpc]""" return self._h @property def hubble_time(self): """ Hubble time as `~astropy.units.Quantity`""" return self._hubble_time @property def hubble_distance(self): """ Hubble distance as `~astropy.units.Quantity`""" return self._hubble_distance @property def critical_density0(self): """ Critical density as `~astropy.units.Quantity` at z=0""" return self._critical_density0 @property def Ogamma0(self): """ Omega gamma; the density/critical density of photons at z=0""" return self._Ogamma0 @property def Onu0(self): """ Omega nu; the density/critical density of neutrinos at z=0""" return self._Onu0 def clone(self, **kwargs): """ Returns a copy of this object, potentially with some changes. Returns ------- newcos : Subclass of FLRW A new instance of this class with the specified changes. Notes ----- This assumes that the values of all constructor arguments are available as properties, which is true of all the provided subclasses but may not be true of user-provided ones. You can't change the type of class, so this can't be used to change between flat and non-flat. If no modifications are requested, then a reference to this object is returned. Examples -------- To make a copy of the Planck13 cosmology with a different Omega_m and a new name: >>> from astropy.cosmology import Planck13 >>> newcos = Planck13.clone(name="Modified Planck 2013", Om0=0.35) """ # Quick return check, taking advantage of the # immutability of cosmological objects if len(kwargs) == 0: return self # Get constructor arguments arglist = signature(self.__init__).parameters.keys() # Build the dictionary of values used to construct this # object. This -assumes- every argument to __init__ has a # property. This is true of all the classes we provide, but # maybe a user won't do that. So at least try to have a useful # error message. argdict = {} for arg in arglist: try: val = getattr(self, arg) argdict[arg] = val except AttributeError: # We didn't find a property -- complain usefully errstr = "Object did not have property corresponding "\ "to constructor argument '{}'; perhaps it is a "\ "user provided subclass that does not do so" raise AttributeError(errstr.format(arg)) # Now substitute in new arguments for newarg in kwargs: if newarg not in argdict: errstr = "User provided argument '{}' not found in "\ "constructor for this object" raise AttributeError(errstr.format(newarg)) argdict[newarg] = kwargs[newarg] return self.__class__(**argdict) @abstractmethod def w(self, z): """ The dark energy equation of state. Parameters ---------- z : array-like Input redshifts. Returns ------- w : ndarray, or float if input scalar The dark energy equation of state Notes ----- The dark energy equation of state is defined as :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the pressure at redshift z and :math:`\\rho(z)` is the density at redshift z, both in units where c=1. This must be overridden by subclasses. """ raise NotImplementedError("w(z) is not implemented") def Om(self, z): """ Return the density parameter for non-relativistic matter at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Returns ------- Om : ndarray, or float if input scalar The density of non-relativistic matter relative to the critical density at each redshift. Notes ----- This does not include neutrinos, even if non-relativistic at the redshift of interest; see `Onu`. """ if isiterable(z): z = np.asarray(z) return self._Om0 * (1. + z) ** 3 * self.inv_efunc(z) ** 2 def Ob(self, z): """ Return the density parameter for baryonic matter at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Returns ------- Ob : ndarray, or float if input scalar The density of baryonic matter relative to the critical density at each redshift. Raises ------ ValueError If Ob0 is None. """ if self._Ob0 is None: raise ValueError("Baryon density not set for this cosmology") if isiterable(z): z = np.asarray(z) return self._Ob0 * (1. + z) ** 3 * self.inv_efunc(z) ** 2 def Odm(self, z): """ Return the density parameter for dark matter at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Returns ------- Odm : ndarray, or float if input scalar The density of non-relativistic dark matter relative to the critical density at each redshift. Raises ------ ValueError If Ob0 is None. Notes ----- This does not include neutrinos, even if non-relativistic at the redshift of interest. """ if self._Odm0 is None: raise ValueError("Baryonic density not set for this cosmology, " "unclear meaning of dark matter density") if isiterable(z): z = np.asarray(z) return self._Odm0 * (1. + z) ** 3 * self.inv_efunc(z) ** 2 def Ok(self, z): """ Return the equivalent density parameter for curvature at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Returns ------- Ok : ndarray, or float if input scalar The equivalent density parameter for curvature at each redshift. """ if isiterable(z): z = np.asarray(z) # Common enough case to be worth checking explicitly if self._Ok0 == 0: return np.zeros(np.asanyarray(z).shape) else: if self._Ok0 == 0: return 0.0 return self._Ok0 * (1. + z) ** 2 * self.inv_efunc(z) ** 2 def Ode(self, z): """ Return the density parameter for dark energy at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Returns ------- Ode : ndarray, or float if input scalar The density of non-relativistic matter relative to the critical density at each redshift. """ if isiterable(z): z = np.asarray(z) # Common case worth checking if self._Ode0 == 0: return np.zeros(np.asanyarray(z).shape) else: if self._Ode0 == 0: return 0.0 return self._Ode0 * self.de_density_scale(z) * self.inv_efunc(z) ** 2 def Ogamma(self, z): """ Return the density parameter for photons at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Returns ------- Ogamma : ndarray, or float if input scalar The energy density of photons relative to the critical density at each redshift. """ if isiterable(z): z = np.asarray(z) return self._Ogamma0 * (1. + z) ** 4 * self.inv_efunc(z) ** 2 def Onu(self, z): """ Return the density parameter for neutrinos at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Returns ------- Onu : ndarray, or float if input scalar The energy density of neutrinos relative to the critical density at each redshift. Note that this includes their kinetic energy (if they have mass), so it is not equal to the commonly used :math:`\\sum \\frac{m_{\\nu}}{94 eV}`, which does not include kinetic energy. """ if isiterable(z): z = np.asarray(z) if self._Onu0 == 0: return np.zeros(np.asanyarray(z).shape) else: if self._Onu0 == 0: return 0.0 return self.Ogamma(z) * self.nu_relative_density(z) def Tcmb(self, z): """ Return the CMB temperature at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Returns ------- Tcmb : `~astropy.units.Quantity` The temperature of the CMB in K. """ if isiterable(z): z = np.asarray(z) return self._Tcmb0 * (1. + z) def Tnu(self, z): """ Return the neutrino temperature at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Returns ------- Tnu : `~astropy.units.Quantity` The temperature of the cosmic neutrino background in K. """ if isiterable(z): z = np.asarray(z) return self._Tnu0 * (1. + z) def nu_relative_density(self, z): """ Neutrino density function relative to the energy density in photons. Parameters ---------- z : array like Redshift Returns ------- f : ndarray, or float if z is scalar The neutrino density scaling factor relative to the density in photons at each redshift Notes ----- The density in neutrinos is given by .. math:: \\rho_{\\nu} \\left(a\\right) = 0.2271 \\, N_{eff} \\, f\\left(m_{\\nu} a / T_{\\nu 0} \\right) \\, \\rho_{\\gamma} \\left( a \\right) where .. math:: f \\left(y\\right) = \\frac{120}{7 \\pi^4} \\int_0^{\\infty} \\, dx \\frac{x^2 \\sqrt{x^2 + y^2}} {e^x + 1} assuming that all neutrino species have the same mass. If they have different masses, a similar term is calculated for each one. Note that f has the asymptotic behavior :math:`f(0) = 1`. This method returns :math:`0.2271 f` using an analytical fitting formula given in Komatsu et al. 2011, ApJS 192, 18. """ # Note that there is also a scalar-z-only cython implementation of # this in scalar_inv_efuncs.pyx, so if you find a problem in this # you need to update there too. # See Komatsu et al. 2011, eq 26 and the surrounding discussion # for an explanation of what we are doing here. # However, this is modified to handle multiple neutrino masses # by computing the above for each mass, then summing prefac = 0.22710731766 # 7/8 (4/11)^4/3 -- see any cosmo book # The massive and massless contribution must be handled separately # But check for common cases first if not self._massivenu: if np.isscalar(z): return prefac * self._Neff else: return prefac * self._Neff * np.ones(np.asanyarray(z).shape) # These are purely fitting constants -- see the Komatsu paper p = 1.83 invp = 0.54644808743 # 1.0 / p k = 0.3173 z = np.asarray(z) curr_nu_y = self._nu_y / (1. + np.expand_dims(z, axis=-1)) rel_mass_per = (1.0 + (k * curr_nu_y) ** p) ** invp rel_mass = rel_mass_per.sum(-1) + self._nmasslessnu return prefac * self._neff_per_nu * rel_mass def _w_integrand(self, ln1pz): """ Internal convenience function for w(z) integral.""" # See Linder 2003, PRL 90, 91301 eq (5) # Assumes scalar input, since this should only be called # inside an integral z = exp(ln1pz) - 1.0 return 1.0 + self.w(z) def de_density_scale(self, z): r""" Evaluates the redshift dependence of the dark energy density. Parameters ---------- z : array-like Input redshifts. Returns ------- I : ndarray, or float if input scalar The scaling of the energy density of dark energy with redshift. Notes ----- The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`, and is given by .. math:: I = \exp \left( 3 \int_{a}^1 \frac{ da^{\prime} }{ a^{\prime} } \left[ 1 + w\left( a^{\prime} \right) \right] \right) It will generally helpful for subclasses to overload this method if the integral can be done analytically for the particular dark energy equation of state that they implement. """ # This allows for an arbitrary w(z) following eq (5) of # Linder 2003, PRL 90, 91301. The code here evaluates # the integral numerically. However, most popular # forms of w(z) are designed to make this integral analytic, # so it is probably a good idea for subclasses to overload this # method if an analytic form is available. # # The integral we actually use (the one given in Linder) # is rewritten in terms of z, so looks slightly different than the # one in the documentation string, but it's the same thing. from scipy.integrate import quad if isiterable(z): z = np.asarray(z) ival = np.array([quad(self._w_integrand, 0, log(1 + redshift))[0] for redshift in z]) return np.exp(3 * ival) else: ival = quad(self._w_integrand, 0, log(1 + z))[0] return exp(3 * ival) def efunc(self, z): """ Function used to calculate H(z), the Hubble parameter. Parameters ---------- z : array-like Input redshifts. Returns ------- E : ndarray, or float if input scalar The redshift scaling of the Hubble constant. Notes ----- The return value, E, is defined such that :math:`H(z) = H_0 E`. It is not necessary to override this method, but if de_density_scale takes a particularly simple form, it may be advantageous to. """ if isiterable(z): z = np.asarray(z) Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0 if self._massivenu: Or = self._Ogamma0 * (1 + self.nu_relative_density(z)) else: Or = self._Ogamma0 + self._Onu0 zp1 = 1.0 + z return np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + Ode0 * self.de_density_scale(z)) def inv_efunc(self, z): """Inverse of efunc. Parameters ---------- z : array-like Input redshifts. Returns ------- E : ndarray, or float if input scalar The redshift scaling of the inverse Hubble constant. """ # Avoid the function overhead by repeating code if isiterable(z): z = np.asarray(z) Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0 if self._massivenu: Or = self._Ogamma0 * (1 + self.nu_relative_density(z)) else: Or = self._Ogamma0 + self._Onu0 zp1 = 1.0 + z return (zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + Ode0 * self.de_density_scale(z))**(-0.5) def _lookback_time_integrand_scalar(self, z): """ Integrand of the lookback time. Parameters ---------- z : float Input redshift. Returns ------- I : float The integrand for the lookback time References ---------- Eqn 30 from Hogg 1999. """ args = self._inv_efunc_scalar_args return self._inv_efunc_scalar(z, *args) / (1.0 + z) def lookback_time_integrand(self, z): """ Integrand of the lookback time. Parameters ---------- z : float or array-like Input redshift. Returns ------- I : float or array The integrand for the lookback time References ---------- Eqn 30 from Hogg 1999. """ if isiterable(z): zp1 = 1.0 + np.asarray(z) else: zp1 = 1. + z return self.inv_efunc(z) / zp1 def _abs_distance_integrand_scalar(self, z): """ Integrand of the absorption distance. Parameters ---------- z : float Input redshift. Returns ------- X : float The integrand for the absorption distance References ---------- See Hogg 1999 section 11. """ args = self._inv_efunc_scalar_args return (1.0 + z) ** 2 * self._inv_efunc_scalar(z, *args) def abs_distance_integrand(self, z): """ Integrand of the absorption distance. Parameters ---------- z : float or array Input redshift. Returns ------- X : float or array The integrand for the absorption distance References ---------- See Hogg 1999 section 11. """ if isiterable(z): zp1 = 1.0 + np.asarray(z) else: zp1 = 1. + z return zp1 ** 2 * self.inv_efunc(z) def H(self, z): """ Hubble parameter (km/s/Mpc) at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Returns ------- H : `~astropy.units.Quantity` Hubble parameter at each input redshift. """ return self._H0 * self.efunc(z) def scale_factor(self, z): """ Scale factor at redshift ``z``. The scale factor is defined as :math:`a = 1 / (1 + z)`. Parameters ---------- z : array-like Input redshifts. Returns ------- a : ndarray, or float if input scalar Scale factor at each input redshift. """ if isiterable(z): z = np.asarray(z) return 1. / (1. + z) def lookback_time(self, z): """ Lookback time in Gyr to redshift ``z``. The lookback time is the difference between the age of the Universe now and the age at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Must be 1D or scalar Returns ------- t : `~astropy.units.Quantity` Lookback time in Gyr to each input redshift. See Also -------- z_at_value : Find the redshift corresponding to a lookback time. """ return self._lookback_time(z) def _lookback_time(self, z): """ Lookback time in Gyr to redshift ``z``. The lookback time is the difference between the age of the Universe now and the age at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Must be 1D or scalar Returns ------- t : `~astropy.units.Quantity` Lookback time in Gyr to each input redshift. """ return self._integral_lookback_time(z) def _integral_lookback_time(self, z): """ Lookback time in Gyr to redshift ``z``. The lookback time is the difference between the age of the Universe now and the age at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Must be 1D or scalar Returns ------- t : `~astropy.units.Quantity` Lookback time in Gyr to each input redshift. """ from scipy.integrate import quad f = lambda red: quad(self._lookback_time_integrand_scalar, 0, red)[0] return self._hubble_time * vectorize_if_needed(f, z) def lookback_distance(self, z): """ The lookback distance is the light travel time distance to a given redshift. It is simply c * lookback_time. It may be used to calculate the proper distance between two redshifts, e.g. for the mean free path to ionizing radiation. Parameters ---------- z : array-like Input redshifts. Must be 1D or scalar Returns ------- d : `~astropy.units.Quantity` Lookback distance in Mpc """ return (self.lookback_time(z) * const.c).to(u.Mpc) def age(self, z): """ Age of the universe in Gyr at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Must be 1D or scalar. Returns ------- t : `~astropy.units.Quantity` The age of the universe in Gyr at each input redshift. See Also -------- z_at_value : Find the redshift corresponding to an age. """ return self._age(z) def _age(self, z): """ Age of the universe in Gyr at redshift ``z``. This internal function exists to be re-defined for optimizations. Parameters ---------- z : array-like Input redshifts. Must be 1D or scalar. Returns ------- t : `~astropy.units.Quantity` The age of the universe in Gyr at each input redshift. """ return self._integral_age(z) def _integral_age(self, z): """ Age of the universe in Gyr at redshift ``z``. Calculated using explicit integration. Parameters ---------- z : array-like Input redshifts. Must be 1D or scalar. Returns ------- t : `~astropy.units.Quantity` The age of the universe in Gyr at each input redshift. See Also -------- z_at_value : Find the redshift corresponding to an age. """ from scipy.integrate import quad f = lambda red: quad(self._lookback_time_integrand_scalar, red, np.inf)[0] return self._hubble_time * vectorize_if_needed(f, z) def critical_density(self, z): """ Critical density in grams per cubic cm at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Returns ------- rho : `~astropy.units.Quantity` Critical density in g/cm^3 at each input redshift. """ return self._critical_density0 * (self.efunc(z)) ** 2 def comoving_distance(self, z): """ Comoving line-of-sight distance in Mpc at a given redshift. The comoving distance along the line-of-sight between two objects remains constant with time for objects in the Hubble flow. Parameters ---------- z : array-like Input redshifts. Must be 1D or scalar. Returns ------- d : `~astropy.units.Quantity` Comoving distance in Mpc to each input redshift. """ return self._comoving_distance_z1z2(0, z) def _comoving_distance_z1z2(self, z1, z2): """ Comoving line-of-sight distance in Mpc between objects at redshifts z1 and z2. The comoving distance along the line-of-sight between two objects remains constant with time for objects in the Hubble flow. Parameters ---------- z1, z2 : array-like, shape (N,) Input redshifts. Must be 1D or scalar. Returns ------- d : `~astropy.units.Quantity` Comoving distance in Mpc between each input redshift. """ return self._integral_comoving_distance_z1z2(z1, z2) def _integral_comoving_distance_z1z2(self, z1, z2): """ Comoving line-of-sight distance in Mpc between objects at redshifts z1 and z2. The comoving distance along the line-of-sight between two objects remains constant with time for objects in the Hubble flow. Parameters ---------- z1, z2 : array-like, shape (N,) Input redshifts. Must be 1D or scalar. Returns ------- d : `~astropy.units.Quantity` Comoving distance in Mpc between each input redshift. """ from scipy.integrate import quad f = lambda z1, z2: quad(self._inv_efunc_scalar, z1, z2, args=self._inv_efunc_scalar_args)[0] return self._hubble_distance * vectorize_if_needed(f, z1, z2) def comoving_transverse_distance(self, z): """ Comoving transverse distance in Mpc at a given redshift. This value is the transverse comoving distance at redshift ``z`` corresponding to an angular separation of 1 radian. This is the same as the comoving distance if omega_k is zero (as in the current concordance lambda CDM model). Parameters ---------- z : array-like Input redshifts. Must be 1D or scalar. Returns ------- d : `~astropy.units.Quantity` Comoving transverse distance in Mpc at each input redshift. Notes ----- This quantity also called the 'proper motion distance' in some texts. """ return self._comoving_transverse_distance_z1z2(0, z) def _comoving_transverse_distance_z1z2(self, z1, z2): """Comoving transverse distance in Mpc between two redshifts. This value is the transverse comoving distance at redshift ``z2`` as seen from redshift ``z1`` corresponding to an angular separation of 1 radian. This is the same as the comoving distance if omega_k is zero (as in the current concordance lambda CDM model). Parameters ---------- z1, z2 : array-like, shape (N,) Input redshifts. Must be 1D or scalar. Returns ------- d : `~astropy.units.Quantity` Comoving transverse distance in Mpc between input redshift. Notes ----- This quantity is also called the 'proper motion distance' in some texts. """ Ok0 = self._Ok0 dc = self._comoving_distance_z1z2(z1, z2) if Ok0 == 0: return dc sqrtOk0 = sqrt(abs(Ok0)) dh = self._hubble_distance if Ok0 > 0: return dh / sqrtOk0 * np.sinh(sqrtOk0 * dc.value / dh.value) else: return dh / sqrtOk0 * np.sin(sqrtOk0 * dc.value / dh.value) def angular_diameter_distance(self, z): """ Angular diameter distance in Mpc at a given redshift. This gives the proper (sometimes called 'physical') transverse distance corresponding to an angle of 1 radian for an object at redshift ``z``. Weinberg, 1972, pp 421-424; Weedman, 1986, pp 65-67; Peebles, 1993, pp 325-327. Parameters ---------- z : array-like Input redshifts. Must be 1D or scalar. Returns ------- d : `~astropy.units.Quantity` Angular diameter distance in Mpc at each input redshift. """ if isiterable(z): z = np.asarray(z) return self.comoving_transverse_distance(z) / (1. + z) def luminosity_distance(self, z): """ Luminosity distance in Mpc at redshift ``z``. This is the distance to use when converting between the bolometric flux from an object at redshift ``z`` and its bolometric luminosity. Parameters ---------- z : array-like Input redshifts. Must be 1D or scalar. Returns ------- d : `~astropy.units.Quantity` Luminosity distance in Mpc at each input redshift. See Also -------- z_at_value : Find the redshift corresponding to a luminosity distance. References ---------- Weinberg, 1972, pp 420-424; Weedman, 1986, pp 60-62. """ if isiterable(z): z = np.asarray(z) return (1. + z) * self.comoving_transverse_distance(z) def angular_diameter_distance_z1z2(self, z1, z2): """ Angular diameter distance between objects at 2 redshifts. Useful for gravitational lensing. Parameters ---------- z1, z2 : array-like, shape (N,) Input redshifts. z2 must be large than z1. Returns ------- d : `~astropy.units.Quantity`, shape (N,) or single if input scalar The angular diameter distance between each input redshift pair. """ z1 = np.asanyarray(z1) z2 = np.asanyarray(z2) return self._comoving_transverse_distance_z1z2(z1, z2) / (1. + z2) def absorption_distance(self, z): """ Absorption distance at redshift ``z``. This is used to calculate the number of objects with some cross section of absorption and number density intersecting a sightline per unit redshift path. Parameters ---------- z : array-like Input redshifts. Must be 1D or scalar. Returns ------- d : float or ndarray Absorption distance (dimensionless) at each input redshift. References ---------- Hogg 1999 Section 11. (astro-ph/9905116) Bahcall, John N. and Peebles, P.J.E. 1969, ApJ, 156L, 7B """ from scipy.integrate import quad f = lambda red: quad(self._abs_distance_integrand_scalar, 0, red)[0] return vectorize_if_needed(f, z) def distmod(self, z): """ Distance modulus at redshift ``z``. The distance modulus is defined as the (apparent magnitude - absolute magnitude) for an object at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Must be 1D or scalar. Returns ------- distmod : `~astropy.units.Quantity` Distance modulus at each input redshift, in magnitudes See Also -------- z_at_value : Find the redshift corresponding to a distance modulus. """ # Remember that the luminosity distance is in Mpc # Abs is necessary because in certain obscure closed cosmologies # the distance modulus can be negative -- which is okay because # it enters as the square. val = 5. * np.log10(abs(self.luminosity_distance(z).value)) + 25.0 return u.Quantity(val, u.mag) def comoving_volume(self, z): """ Comoving volume in cubic Mpc at redshift ``z``. This is the volume of the universe encompassed by redshifts less than ``z``. For the case of omega_k = 0 it is a sphere of radius `comoving_distance` but it is less intuitive if omega_k is not 0. Parameters ---------- z : array-like Input redshifts. Must be 1D or scalar. Returns ------- V : `~astropy.units.Quantity` Comoving volume in :math:`Mpc^3` at each input redshift. """ Ok0 = self._Ok0 if Ok0 == 0: return 4. / 3. * pi * self.comoving_distance(z) ** 3 dh = self._hubble_distance.value # .value for speed dm = self.comoving_transverse_distance(z).value term1 = 4. * pi * dh ** 3 / (2. * Ok0) * u.Mpc ** 3 term2 = dm / dh * np.sqrt(1 + Ok0 * (dm / dh) ** 2) term3 = sqrt(abs(Ok0)) * dm / dh if Ok0 > 0: return term1 * (term2 - 1. / sqrt(abs(Ok0)) * np.arcsinh(term3)) else: return term1 * (term2 - 1. / sqrt(abs(Ok0)) * np.arcsin(term3)) def differential_comoving_volume(self, z): """Differential comoving volume at redshift z. Useful for calculating the effective comoving volume. For example, allows for integration over a comoving volume that has a sensitivity function that changes with redshift. The total comoving volume is given by integrating differential_comoving_volume to redshift z and multiplying by a solid angle. Parameters ---------- z : array-like Input redshifts. Returns ------- dV : `~astropy.units.Quantity` Differential comoving volume per redshift per steradian at each input redshift.""" dh = self._hubble_distance da = self.angular_diameter_distance(z) zp1 = 1.0 + z return dh * ((zp1 * da) ** 2.0) / u.Quantity(self.efunc(z), u.steradian) def kpc_comoving_per_arcmin(self, z): """ Separation in transverse comoving kpc corresponding to an arcminute at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Must be 1D or scalar. Returns ------- d : `~astropy.units.Quantity` The distance in comoving kpc corresponding to an arcmin at each input redshift. """ return (self.comoving_transverse_distance(z).to(u.kpc) * arcmin_in_radians / u.arcmin) def kpc_proper_per_arcmin(self, z): """ Separation in transverse proper kpc corresponding to an arcminute at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Must be 1D or scalar. Returns ------- d : `~astropy.units.Quantity` The distance in proper kpc corresponding to an arcmin at each input redshift. """ return (self.angular_diameter_distance(z).to(u.kpc) * arcmin_in_radians / u.arcmin) def arcsec_per_kpc_comoving(self, z): """ Angular separation in arcsec corresponding to a comoving kpc at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Must be 1D or scalar. Returns ------- theta : `~astropy.units.Quantity` The angular separation in arcsec corresponding to a comoving kpc at each input redshift. """ return u.arcsec / (self.comoving_transverse_distance(z).to(u.kpc) * arcsec_in_radians) def arcsec_per_kpc_proper(self, z): """ Angular separation in arcsec corresponding to a proper kpc at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Must be 1D or scalar. Returns ------- theta : `~astropy.units.Quantity` The angular separation in arcsec corresponding to a proper kpc at each input redshift. """ return u.arcsec / (self.angular_diameter_distance(z).to(u.kpc) * arcsec_in_radians) class LambdaCDM(FLRW): """FLRW cosmology with a cosmological constant and curvature. This has no additional attributes beyond those of FLRW. Parameters ---------- H0 : float or `~astropy.units.Quantity` Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. Ode0 : float Omega dark energy: density of the cosmological constant in units of the critical density at z=0. Tcmb0 : float or scalar `~astropy.units.Quantity`, optional Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K]. Setting this to zero will turn off both photons and neutrinos (even massive ones). Neff : float, optional Effective number of Neutrino species. Default 3.04. m_nu : `~astropy.units.Quantity`, optional Mass of each neutrino species. If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Typically this means you should provide three neutrino masses unless you are considering something like a sterile neutrino. Ob0 : float or None, optional Omega baryons: density of baryonic matter in units of the critical density at z=0. If this is set to None (the default), any computation that requires its value will raise an exception. name : str, optional Name for this cosmological object. Examples -------- >>> from astropy.cosmology import LambdaCDM >>> cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) """ def __init__(self, H0, Om0, Ode0, Tcmb0=0, Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None): FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name, Ob0=Ob0) # Please see "Notes about speeding up integrals" for discussion # about what is being done here. if self._Tcmb0.value == 0: self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc_norel self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0) elif not self._massivenu: self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc_nomnu self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._Ogamma0 + self._Onu0) else: self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._Ogamma0, self._neff_per_nu, self._nmasslessnu, self._nu_y_list) def w(self, z): """Returns dark energy equation of state at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Returns ------- w : ndarray, or float if input scalar The dark energy equation of state Notes ------ The dark energy equation of state is defined as :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the pressure at redshift z and :math:`\\rho(z)` is the density at redshift z, both in units where c=1. Here this is :math:`w(z) = -1`. """ if np.isscalar(z): return -1.0 else: return -1.0 * np.ones(np.asanyarray(z).shape) def de_density_scale(self, z): """ Evaluates the redshift dependence of the dark energy density. Parameters ---------- z : array-like Input redshifts. Returns ------- I : ndarray, or float if input scalar The scaling of the energy density of dark energy with redshift. Notes ----- The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`, and in this case is given by :math:`I = 1`. """ if np.isscalar(z): return 1. else: return np.ones(np.asanyarray(z).shape) def efunc(self, z): """ Function used to calculate H(z), the Hubble parameter. Parameters ---------- z : array-like Input redshifts. Returns ------- E : ndarray, or float if input scalar The redshift scaling of the Hubble constant. Notes ----- The return value, E, is defined such that :math:`H(z) = H_0 E`. """ if isiterable(z): z = np.asarray(z) # We override this because it takes a particularly simple # form for a cosmological constant Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0 if self._massivenu: Or = self._Ogamma0 * (1. + self.nu_relative_density(z)) else: Or = self._Ogamma0 + self._Onu0 zp1 = 1.0 + z return np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + Ode0) def inv_efunc(self, z): r""" Function used to calculate :math:`\frac{1}{H_z}`. Parameters ---------- z : array-like Input redshifts. Returns ------- E : ndarray, or float if input scalar The inverse redshift scaling of the Hubble constant. Notes ----- The return value, E, is defined such that :math:`H_z = H_0 / E`. """ if isiterable(z): z = np.asarray(z) Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0 if self._massivenu: Or = self._Ogamma0 * (1 + self.nu_relative_density(z)) else: Or = self._Ogamma0 + self._Onu0 zp1 = 1.0 + z return (zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + Ode0)**(-0.5) class FlatLambdaCDM(LambdaCDM): """FLRW cosmology with a cosmological constant and no curvature. This has no additional attributes beyond those of FLRW. Parameters ---------- H0 : float or `~astropy.units.Quantity` Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. Tcmb0 : float or scalar `~astropy.units.Quantity`, optional Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K]. Setting this to zero will turn off both photons and neutrinos (even massive ones). Neff : float, optional Effective number of Neutrino species. Default 3.04. m_nu : `~astropy.units.Quantity`, optional Mass of each neutrino species. If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Typically this means you should provide three neutrino masses unless you are considering something like a sterile neutrino. Ob0 : float or None, optional Omega baryons: density of baryonic matter in units of the critical density at z=0. If this is set to None (the default), any computation that requires its value will raise an exception. name : str, optional Name for this cosmological object. Examples -------- >>> from astropy.cosmology import FlatLambdaCDM >>> cosmo = FlatLambdaCDM(H0=70, Om0=0.3) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) """ def __init__(self, H0, Om0, Tcmb0=0, Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None): LambdaCDM.__init__(self, H0, Om0, 0.0, Tcmb0, Neff, m_nu, name=name, Ob0=Ob0) # Do some twiddling after the fact to get flatness self._Ode0 = 1.0 - self._Om0 - self._Ogamma0 - self._Onu0 self._Ok0 = 0.0 # Please see "Notes about speeding up integrals" for discussion # about what is being done here. if self._Tcmb0.value == 0: self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc_norel self._inv_efunc_scalar_args = (self._Om0, self._Ode0) # Call out Om0=1 (Einstein-de Sitter) and Om0=0 (de Sitter) cases. # The dS case is required because the hypergeometric case # for Omega_M=0 would lead to an infinity in its argument. # The EdS case is three times faster than the hypergeometric. if self._Om0 == 0: self._comoving_distance_z1z2 = \ self._dS_comoving_distance_z1z2 self._age = self._dS_age self._lookback_time = self._dS_lookback_time elif self._Om0 == 1: self._comoving_distance_z1z2 = \ self._EdS_comoving_distance_z1z2 self._age = self._EdS_age self._lookback_time = self._EdS_lookback_time else: self._comoving_distance_z1z2 = \ self._hypergeometric_comoving_distance_z1z2 self._age = self._analytic_age self._lookback_time = self._analytic_lookback_time elif not self._massivenu: self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc_nomnu self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ogamma0 + self._Onu0) else: self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ogamma0, self._neff_per_nu, self._nmasslessnu, self._nu_y_list) def _dS_comoving_distance_z1z2(self, z1, z2): """ Comoving line-of-sight distance in Mpc between objects at redshifts z1 and z2 in a flat, Omega_Lambda=1 cosmology (de Sitter). The comoving distance along the line-of-sight between two objects remains constant with time for objects in the Hubble flow. The de Sitter case has an analytic solution. Parameters ---------- z1, z2 : array-like, shape (N,) Input redshifts. Must be 1D or scalar. Returns ------- d : `~astropy.units.Quantity` Comoving distance in Mpc between each input redshift. """ if isiterable(z1): z1 = np.asarray(z1) z2 = np.asarray(z2) if z1.shape != z2.shape: msg = "z1 and z2 have different shapes" raise ValueError(msg) return self._hubble_distance * (z2 - z1) def _EdS_comoving_distance_z1z2(self, z1, z2): """ Comoving line-of-sight distance in Mpc between objects at redshifts z1 and z2 in a flat, Omega_M=1 cosmology (Einstein - de Sitter). The comoving distance along the line-of-sight between two objects remains constant with time for objects in the Hubble flow. For OM=1, Omega_rad=0 the comoving distance has an analytic solution. Parameters ---------- z1, z2 : array-like, shape (N,) Input redshifts. Must be 1D or scalar. Returns ------- d : `~astropy.units.Quantity` Comoving distance in Mpc between each input redshift. """ if isiterable(z1): z1 = np.asarray(z1) z2 = np.asarray(z2) if z1.shape != z2.shape: msg = "z1 and z2 have different shapes" raise ValueError(msg) prefactor = 2 * self._hubble_distance return prefactor * ((1+z1)**(-1./2) - (1+z2)**(-1./2)) def _hypergeometric_comoving_distance_z1z2(self, z1, z2): """ Comoving line-of-sight distance in Mpc between objects at redshifts z1 and z2. The comoving distance along the line-of-sight between two objects remains constant with time for objects in the Hubble flow. For Omega_radiation = 0 the comoving distance can be directly calculated as a hypergeometric function. Equation here taken from Baes, Camps, Van De Putte, 2017, MNRAS, 468, 927. Parameters ---------- z1, z2 : array-like Input redshifts. Returns ------- d : `~astropy.units.Quantity` Comoving distance in Mpc between each input redshift. """ if isiterable(z1): z1 = np.asarray(z1) z2 = np.asarray(z2) if z1.shape != z2.shape: msg = "z1 and z2 have different shapes" raise ValueError(msg) s = ((1 - self._Om0) / self._Om0) ** (1./3) # Use np.sqrt here to handle negative s (Om0>1). prefactor = self._hubble_distance / np.sqrt(s * self._Om0) return prefactor * (self._T_hypergeometric(s / (1 + z1)) - self._T_hypergeometric(s / (1 + z2))) def _T_hypergeometric(self, x): """ Compute T_hypergeometric(x) using Gauss Hypergeometric function 2F1 T(x) = 2 \\sqrt(x) _{2}F_{1} \\left(\\frac{1}{6}, \\frac{1}{2}; \\frac{7}{6}; -x^3) Note: The scipy.special.hyp2f1 code already implements the hypergeometric transformation suggested by Baes, Camps, Van De Putte, 2017, MNRAS, 468, 927. for use in actual numerical evaulations. """ from scipy.special import hyp2f1 return 2 * np.sqrt(x) * hyp2f1(1./6, 1./2, 7./6, -x**3) def _dS_age(self, z): """ Age of the universe in Gyr at redshift ``z``. The age of a de Sitter Universe is infinite. Parameters ---------- z : array-like Input redshifts. Returns ------- t : `~astropy.units.Quantity` The age of the universe in Gyr at each input redshift. """ return self._hubble_time * inf_like(z) def _EdS_age(self, z): """ Age of the universe in Gyr at redshift ``z``. For Omega_radiation = 0 (T_CMB = 0; massless neutrinos) the age can be directly calculated as an elliptic integral. See, e.g., Thomas and Kantowski, arXiv:0003463 Parameters ---------- z : array-like Input redshifts. Returns ------- t : `~astropy.units.Quantity` The age of the universe in Gyr at each input redshift. """ if isiterable(z): z = np.asarray(z) return (2./3) * self._hubble_time * (1+z)**(-3./2) def _analytic_age(self, z): """ Age of the universe in Gyr at redshift ``z``. For Omega_radiation = 0 (T_CMB = 0; massless neutrinos) the age can be directly calculated as an elliptic integral. See, e.g., Thomas and Kantowski, arXiv:0003463 Parameters ---------- z : array-like Input redshifts. Returns ------- t : `~astropy.units.Quantity` The age of the universe in Gyr at each input redshift. """ if isiterable(z): z = np.asarray(z) # Use np.sqrt, np.arcsinh instead of math.sqrt, math.asinh # to handle properly the complex numbers for 1 - Om0 < 0 prefactor = (2./3) * self._hubble_time / \ np.lib.scimath.sqrt(1 - self._Om0) arg = np.arcsinh(np.lib.scimath.sqrt((1 / self._Om0 - 1 + 0j) / (1 + z)**3)) return (prefactor * arg).real def _EdS_lookback_time(self, z): """ Lookback time in Gyr to redshift ``z``. The lookback time is the difference between the age of the Universe now and the age at redshift ``z``. For Omega_radiation = 0 (T_CMB = 0; massless neutrinos) the age can be directly calculated as an elliptic integral. The lookback time is here calculated based on the age(0) - age(z) Parameters ---------- z : array-like Input redshifts. Must be 1D or scalar Returns ------- t : `~astropy.units.Quantity` Lookback time in Gyr to each input redshift. """ return self._EdS_age(0) - self._EdS_age(z) def _dS_lookback_time(self, z): """ Lookback time in Gyr to redshift ``z``. The lookback time is the difference between the age of the Universe now and the age at redshift ``z``. For Omega_radiation = 0 (T_CMB = 0; massless neutrinos) the age can be directly calculated. a = exp(H * t) where t=0 at z=0 t = (1/H) (ln 1 - ln a) = (1/H) (0 - ln (1/(1+z))) = (1/H) ln(1+z) Parameters ---------- z : array-like Input redshifts. Returns ------- t : `~astropy.units.Quantity` Lookback time in Gyr to each input redshift. """ if isiterable(z): z = np.asarray(z) return self._hubble_time * np.log(1+z) def _analytic_lookback_time(self, z): """ Lookback time in Gyr to redshift ``z``. The lookback time is the difference between the age of the Universe now and the age at redshift ``z``. For Omega_radiation = 0 (T_CMB = 0; massless neutrinos) the age can be directly calculated. The lookback time is here calculated based on the age(0) - age(z) Parameters ---------- z : array-like Input redshifts. Must be 1D or scalar Returns ------- t : `~astropy.units.Quantity` Lookback time in Gyr to each input redshift. """ return self._analytic_age(0) - self._analytic_age(z) def efunc(self, z): """ Function used to calculate H(z), the Hubble parameter. Parameters ---------- z : array-like Input redshifts. Returns ------- E : ndarray, or float if input scalar The redshift scaling of the Hubble constant. Notes ----- The return value, E, is defined such that :math:`H(z) = H_0 E`. """ if isiterable(z): z = np.asarray(z) # We override this because it takes a particularly simple # form for a cosmological constant Om0, Ode0 = self._Om0, self._Ode0 if self._massivenu: Or = self._Ogamma0 * (1 + self.nu_relative_density(z)) else: Or = self._Ogamma0 + self._Onu0 zp1 = 1.0 + z return np.sqrt(zp1 ** 3 * (Or * zp1 + Om0) + Ode0) def inv_efunc(self, z): r"""Function used to calculate :math:`\frac{1}{H_z}`. Parameters ---------- z : array-like Input redshifts. Returns ------- E : ndarray, or float if input scalar The inverse redshift scaling of the Hubble constant. Notes ----- The return value, E, is defined such that :math:`H_z = H_0 / E`. """ if isiterable(z): z = np.asarray(z) Om0, Ode0 = self._Om0, self._Ode0 if self._massivenu: Or = self._Ogamma0 * (1. + self.nu_relative_density(z)) else: Or = self._Ogamma0 + self._Onu0 zp1 = 1.0 + z return (zp1 ** 3 * (Or * zp1 + Om0) + Ode0)**(-0.5) def __repr__(self): retstr = "{0}H0={1:.3g}, Om0={2:.3g}, Tcmb0={3:.4g}, "\ "Neff={4:.3g}, m_nu={5}, Ob0={6:s})" return retstr.format(self._namelead(), self._H0, self._Om0, self._Tcmb0, self._Neff, self.m_nu, _float_or_none(self._Ob0)) class wCDM(FLRW): """FLRW cosmology with a constant dark energy equation of state and curvature. This has one additional attribute beyond those of FLRW. Parameters ---------- H0 : float or `~astropy.units.Quantity` Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. Ode0 : float Omega dark energy: density of dark energy in units of the critical density at z=0. w0 : float, optional Dark energy equation of state at all redshifts. This is pressure/density for dark energy in units where c=1. A cosmological constant has w0=-1.0. Tcmb0 : float or scalar `~astropy.units.Quantity`, optional Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K]. Setting this to zero will turn off both photons and neutrinos (even massive ones). Neff : float, optional Effective number of Neutrino species. Default 3.04. m_nu : `~astropy.units.Quantity`, optional Mass of each neutrino species. If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Typically this means you should provide three neutrino masses unless you are considering something like a sterile neutrino. Ob0 : float or None, optional Omega baryons: density of baryonic matter in units of the critical density at z=0. If this is set to None (the default), any computation that requires its value will raise an exception. name : str, optional Name for this cosmological object. Examples -------- >>> from astropy.cosmology import wCDM >>> cosmo = wCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) """ def __init__(self, H0, Om0, Ode0, w0=-1., Tcmb0=0, Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None): FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name, Ob0=Ob0) self._w0 = float(w0) # Please see "Notes about speeding up integrals" for discussion # about what is being done here. if self._Tcmb0.value == 0: self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc_norel self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._w0) elif not self._massivenu: self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc_nomnu self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._Ogamma0 + self._Onu0, self._w0) else: self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._Ogamma0, self._neff_per_nu, self._nmasslessnu, self._nu_y_list, self._w0) @property def w0(self): """ Dark energy equation of state""" return self._w0 def w(self, z): """Returns dark energy equation of state at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Returns ------- w : ndarray, or float if input scalar The dark energy equation of state Notes ------ The dark energy equation of state is defined as :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the pressure at redshift z and :math:`\\rho(z)` is the density at redshift z, both in units where c=1. Here this is :math:`w(z) = w_0`. """ if np.isscalar(z): return self._w0 else: return self._w0 * np.ones(np.asanyarray(z).shape) def de_density_scale(self, z): """ Evaluates the redshift dependence of the dark energy density. Parameters ---------- z : array-like Input redshifts. Returns ------- I : ndarray, or float if input scalar The scaling of the energy density of dark energy with redshift. Notes ----- The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`, and in this case is given by :math:`I = \\left(1 + z\\right)^{3\\left(1 + w_0\\right)}` """ if isiterable(z): z = np.asarray(z) return (1. + z) ** (3. * (1. + self._w0)) def efunc(self, z): """ Function used to calculate H(z), the Hubble parameter. Parameters ---------- z : array-like Input redshifts. Returns ------- E : ndarray, or float if input scalar The redshift scaling of the Hubble constant. Notes ----- The return value, E, is defined such that :math:`H(z) = H_0 E`. """ if isiterable(z): z = np.asarray(z) Om0, Ode0, Ok0, w0 = self._Om0, self._Ode0, self._Ok0, self._w0 if self._massivenu: Or = self._Ogamma0 * (1. + self.nu_relative_density(z)) else: Or = self._Ogamma0 + self._Onu0 zp1 = 1.0 + z return np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + Ode0 * zp1 ** (3. * (1. + w0))) def inv_efunc(self, z): r""" Function used to calculate :math:`\frac{1}{H_z}`. Parameters ---------- z : array-like Input redshifts. Returns ------- E : ndarray, or float if input scalar The inverse redshift scaling of the Hubble constant. Notes ----- The return value, E, is defined such that :math:`H_z = H_0 / E`. """ if isiterable(z): z = np.asarray(z) Om0, Ode0, Ok0, w0 = self._Om0, self._Ode0, self._Ok0, self._w0 if self._massivenu: Or = self._Ogamma0 * (1. + self.nu_relative_density(z)) else: Or = self._Ogamma0 + self._Onu0 zp1 = 1.0 + z return (zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + Ode0 * zp1 ** (3. * (1. + w0)))**(-0.5) def __repr__(self): retstr = "{0}H0={1:.3g}, Om0={2:.3g}, Ode0={3:.3g}, w0={4:.3g}, "\ "Tcmb0={5:.4g}, Neff={6:.3g}, m_nu={7}, Ob0={8:s})" return retstr.format(self._namelead(), self._H0, self._Om0, self._Ode0, self._w0, self._Tcmb0, self._Neff, self.m_nu, _float_or_none(self._Ob0)) class FlatwCDM(wCDM): """FLRW cosmology with a constant dark energy equation of state and no spatial curvature. This has one additional attribute beyond those of FLRW. Parameters ---------- H0 : float or `~astropy.units.Quantity` Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. w0 : float, optional Dark energy equation of state at all redshifts. This is pressure/density for dark energy in units where c=1. A cosmological constant has w0=-1.0. Tcmb0 : float or scalar `~astropy.units.Quantity`, optional Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K]. Setting this to zero will turn off both photons and neutrinos (even massive ones). Neff : float, optional Effective number of Neutrino species. Default 3.04. m_nu : `~astropy.units.Quantity`, optional Mass of each neutrino species. If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Typically this means you should provide three neutrino masses unless you are considering something like a sterile neutrino. Ob0 : float or None, optional Omega baryons: density of baryonic matter in units of the critical density at z=0. If this is set to None (the default), any computation that requires its value will raise an exception. name : str, optional Name for this cosmological object. Examples -------- >>> from astropy.cosmology import FlatwCDM >>> cosmo = FlatwCDM(H0=70, Om0=0.3, w0=-0.9) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) """ def __init__(self, H0, Om0, w0=-1., Tcmb0=0, Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None): wCDM.__init__(self, H0, Om0, 0.0, w0, Tcmb0, Neff, m_nu, name=name, Ob0=Ob0) # Do some twiddling after the fact to get flatness self._Ode0 = 1.0 - self._Om0 - self._Ogamma0 - self._Onu0 self._Ok0 = 0.0 # Please see "Notes about speeding up integrals" for discussion # about what is being done here. if self._Tcmb0.value == 0: self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc_norel self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._w0) elif not self._massivenu: self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc_nomnu self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ogamma0 + self._Onu0, self._w0) else: self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ogamma0, self._neff_per_nu, self._nmasslessnu, self._nu_y_list, self._w0) def efunc(self, z): """ Function used to calculate H(z), the Hubble parameter. Parameters ---------- z : array-like Input redshifts. Returns ------- E : ndarray, or float if input scalar The redshift scaling of the Hubble constant. Notes ----- The return value, E, is defined such that :math:`H(z) = H_0 E`. """ if isiterable(z): z = np.asarray(z) Om0, Ode0, w0 = self._Om0, self._Ode0, self._w0 if self._massivenu: Or = self._Ogamma0 * (1. + self.nu_relative_density(z)) else: Or = self._Ogamma0 + self._Onu0 zp1 = 1. + z return np.sqrt(zp1 ** 3 * (Or * zp1 + Om0) + Ode0 * zp1 ** (3. * (1 + w0))) def inv_efunc(self, z): r""" Function used to calculate :math:`\frac{1}{H_z}`. Parameters ---------- z : array-like Input redshifts. Returns ------- E : ndarray, or float if input scalar The inverse redshift scaling of the Hubble constant. Notes ----- The return value, E, is defined such that :math:`H_z = H_0 / E`. """ if isiterable(z): z = np.asarray(z) Om0, Ode0, w0 = self._Om0, self._Ode0, self._w0 if self._massivenu: Or = self._Ogamma0 * (1. + self.nu_relative_density(z)) else: Or = self._Ogamma0 + self._Onu0 zp1 = 1. + z return (zp1 ** 3 * (Or * zp1 + Om0) + Ode0 * zp1 ** (3. * (1. + w0)))**(-0.5) def __repr__(self): retstr = "{0}H0={1:.3g}, Om0={2:.3g}, w0={3:.3g}, Tcmb0={4:.4g}, "\ "Neff={5:.3g}, m_nu={6}, Ob0={7:s})" return retstr.format(self._namelead(), self._H0, self._Om0, self._w0, self._Tcmb0, self._Neff, self.m_nu, _float_or_none(self._Ob0)) class w0waCDM(FLRW): """FLRW cosmology with a CPL dark energy equation of state and curvature. The equation for the dark energy equation of state uses the CPL form as described in Chevallier & Polarski Int. J. Mod. Phys. D10, 213 (2001) and Linder PRL 90, 91301 (2003): :math:`w(z) = w_0 + w_a (1-a) = w_0 + w_a z / (1+z)`. Parameters ---------- H0 : float or `~astropy.units.Quantity` Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. Ode0 : float Omega dark energy: density of dark energy in units of the critical density at z=0. w0 : float, optional Dark energy equation of state at z=0 (a=1). This is pressure/density for dark energy in units where c=1. wa : float, optional Negative derivative of the dark energy equation of state with respect to the scale factor. A cosmological constant has w0=-1.0 and wa=0.0. Tcmb0 : float or scalar `~astropy.units.Quantity`, optional Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K]. Setting this to zero will turn off both photons and neutrinos (even massive ones). Neff : float, optional Effective number of Neutrino species. Default 3.04. m_nu : `~astropy.units.Quantity`, optional Mass of each neutrino species. If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Typically this means you should provide three neutrino masses unless you are considering something like a sterile neutrino. Ob0 : float or None, optional Omega baryons: density of baryonic matter in units of the critical density at z=0. If this is set to None (the default), any computation that requires its value will raise an exception. name : str, optional Name for this cosmological object. Examples -------- >>> from astropy.cosmology import w0waCDM >>> cosmo = w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.2) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) """ def __init__(self, H0, Om0, Ode0, w0=-1., wa=0., Tcmb0=0, Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None): FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name, Ob0=Ob0) self._w0 = float(w0) self._wa = float(wa) # Please see "Notes about speeding up integrals" for discussion # about what is being done here. if self._Tcmb0.value == 0: self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc_norel self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._w0, self._wa) elif not self._massivenu: self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc_nomnu self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._Ogamma0 + self._Onu0, self._w0, self._wa) else: self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._Ogamma0, self._neff_per_nu, self._nmasslessnu, self._nu_y_list, self._w0, self._wa) @property def w0(self): """ Dark energy equation of state at z=0""" return self._w0 @property def wa(self): """ Negative derivative of dark energy equation of state w.r.t. a""" return self._wa def w(self, z): """Returns dark energy equation of state at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Returns ------- w : ndarray, or float if input scalar The dark energy equation of state Notes ------ The dark energy equation of state is defined as :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the pressure at redshift z and :math:`\\rho(z)` is the density at redshift z, both in units where c=1. Here this is :math:`w(z) = w_0 + w_a (1 - a) = w_0 + w_a \\frac{z}{1+z}`. """ if isiterable(z): z = np.asarray(z) return self._w0 + self._wa * z / (1.0 + z) def de_density_scale(self, z): r""" Evaluates the redshift dependence of the dark energy density. Parameters ---------- z : array-like Input redshifts. Returns ------- I : ndarray, or float if input scalar The scaling of the energy density of dark energy with redshift. Notes ----- The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`, and in this case is given by .. math:: I = \left(1 + z\right)^{3 \left(1 + w_0 + w_a\right)} \exp \left(-3 w_a \frac{z}{1+z}\right) """ if isiterable(z): z = np.asarray(z) zp1 = 1.0 + z return zp1 ** (3 * (1 + self._w0 + self._wa)) * \ np.exp(-3 * self._wa * z / zp1) def __repr__(self): retstr = "{0}H0={1:.3g}, Om0={2:.3g}, "\ "Ode0={3:.3g}, w0={4:.3g}, wa={5:.3g}, Tcmb0={6:.4g}, "\ "Neff={7:.3g}, m_nu={8}, Ob0={9:s})" return retstr.format(self._namelead(), self._H0, self._Om0, self._Ode0, self._w0, self._wa, self._Tcmb0, self._Neff, self.m_nu, _float_or_none(self._Ob0)) class Flatw0waCDM(w0waCDM): """FLRW cosmology with a CPL dark energy equation of state and no curvature. The equation for the dark energy equation of state uses the CPL form as described in Chevallier & Polarski Int. J. Mod. Phys. D10, 213 (2001) and Linder PRL 90, 91301 (2003): :math:`w(z) = w_0 + w_a (1-a) = w_0 + w_a z / (1+z)`. Parameters ---------- H0 : float or `~astropy.units.Quantity` Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. w0 : float, optional Dark energy equation of state at z=0 (a=1). This is pressure/density for dark energy in units where c=1. wa : float, optional Negative derivative of the dark energy equation of state with respect to the scale factor. A cosmological constant has w0=-1.0 and wa=0.0. Tcmb0 : float or scalar `~astropy.units.Quantity`, optional Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K]. Setting this to zero will turn off both photons and neutrinos (even massive ones). Neff : float, optional Effective number of Neutrino species. Default 3.04. m_nu : `~astropy.units.Quantity`, optional Mass of each neutrino species. If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Typically this means you should provide three neutrino masses unless you are considering something like a sterile neutrino. Ob0 : float or None, optional Omega baryons: density of baryonic matter in units of the critical density at z=0. If this is set to None (the default), any computation that requires its value will raise an exception. name : str, optional Name for this cosmological object. Examples -------- >>> from astropy.cosmology import Flatw0waCDM >>> cosmo = Flatw0waCDM(H0=70, Om0=0.3, w0=-0.9, wa=0.2) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) """ def __init__(self, H0, Om0, w0=-1., wa=0., Tcmb0=0, Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None): w0waCDM.__init__(self, H0, Om0, 0.0, w0=w0, wa=wa, Tcmb0=Tcmb0, Neff=Neff, m_nu=m_nu, name=name, Ob0=Ob0) # Do some twiddling after the fact to get flatness self._Ode0 = 1.0 - self._Om0 - self._Ogamma0 - self._Onu0 self._Ok0 = 0.0 # Please see "Notes about speeding up integrals" for discussion # about what is being done here. if self._Tcmb0.value == 0: self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc_norel self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._w0, self._wa) elif not self._massivenu: self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc_nomnu self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ogamma0 + self._Onu0, self._w0, self._wa) else: self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ogamma0, self._neff_per_nu, self._nmasslessnu, self._nu_y_list, self._w0, self._wa) def __repr__(self): retstr = "{0}H0={1:.3g}, Om0={2:.3g}, "\ "w0={3:.3g}, Tcmb0={4:.4g}, Neff={5:.3g}, m_nu={6}, "\ "Ob0={7:s})" return retstr.format(self._namelead(), self._H0, self._Om0, self._w0, self._Tcmb0, self._Neff, self.m_nu, _float_or_none(self._Ob0)) class wpwaCDM(FLRW): """FLRW cosmology with a CPL dark energy equation of state, a pivot redshift, and curvature. The equation for the dark energy equation of state uses the CPL form as described in Chevallier & Polarski Int. J. Mod. Phys. D10, 213 (2001) and Linder PRL 90, 91301 (2003), but modified to have a pivot redshift as in the findings of the Dark Energy Task Force (Albrecht et al. arXiv:0901.0721 (2009)): :math:`w(a) = w_p + w_a (a_p - a) = w_p + w_a( 1/(1+zp) - 1/(1+z) )`. Parameters ---------- H0 : float or `~astropy.units.Quantity` Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. Ode0 : float Omega dark energy: density of dark energy in units of the critical density at z=0. wp : float, optional Dark energy equation of state at the pivot redshift zp. This is pressure/density for dark energy in units where c=1. wa : float, optional Negative derivative of the dark energy equation of state with respect to the scale factor. A cosmological constant has wp=-1.0 and wa=0.0. zp : float, optional Pivot redshift -- the redshift where w(z) = wp Tcmb0 : float or scalar `~astropy.units.Quantity`, optional Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K]. Setting this to zero will turn off both photons and neutrinos (even massive ones). Neff : float, optional Effective number of Neutrino species. Default 3.04. m_nu : `~astropy.units.Quantity`, optional Mass of each neutrino species. If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Typically this means you should provide three neutrino masses unless you are considering something like a sterile neutrino. Ob0 : float or None, optional Omega baryons: density of baryonic matter in units of the critical density at z=0. If this is set to None (the default), any computation that requires its value will raise an exception. name : str, optional Name for this cosmological object. Examples -------- >>> from astropy.cosmology import wpwaCDM >>> cosmo = wpwaCDM(H0=70, Om0=0.3, Ode0=0.7, wp=-0.9, wa=0.2, zp=0.4) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) """ def __init__(self, H0, Om0, Ode0, wp=-1., wa=0., zp=0, Tcmb0=0, Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None): FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name, Ob0=Ob0) self._wp = float(wp) self._wa = float(wa) self._zp = float(zp) # Please see "Notes about speeding up integrals" for discussion # about what is being done here. apiv = 1.0 / (1.0 + self._zp) if self._Tcmb0.value == 0: self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc_norel self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._wp, apiv, self._wa) elif not self._massivenu: self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc_nomnu self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._Ogamma0 + self._Onu0, self._wp, apiv, self._wa) else: self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._Ogamma0, self._neff_per_nu, self._nmasslessnu, self._nu_y_list, self._wp, apiv, self._wa) @property def wp(self): """ Dark energy equation of state at the pivot redshift zp""" return self._wp @property def wa(self): """ Negative derivative of dark energy equation of state w.r.t. a""" return self._wa @property def zp(self): """ The pivot redshift, where w(z) = wp""" return self._zp def w(self, z): """Returns dark energy equation of state at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Returns ------- w : ndarray, or float if input scalar The dark energy equation of state Notes ------ The dark energy equation of state is defined as :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the pressure at redshift z and :math:`\\rho(z)` is the density at redshift z, both in units where c=1. Here this is :math:`w(z) = w_p + w_a (a_p - a)` where :math:`a = 1/1+z` and :math:`a_p = 1 / 1 + z_p`. """ if isiterable(z): z = np.asarray(z) apiv = 1.0 / (1.0 + self._zp) return self._wp + self._wa * (apiv - 1.0 / (1. + z)) def de_density_scale(self, z): r""" Evaluates the redshift dependence of the dark energy density. Parameters ---------- z : array-like Input redshifts. Returns ------- I : ndarray, or float if input scalar The scaling of the energy density of dark energy with redshift. Notes ----- The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`, and in this case is given by .. math:: a_p = \frac{1}{1 + z_p} I = \left(1 + z\right)^{3 \left(1 + w_p + a_p w_a\right)} \exp \left(-3 w_a \frac{z}{1+z}\right) """ if isiterable(z): z = np.asarray(z) zp1 = 1. + z apiv = 1. / (1. + self._zp) return zp1 ** (3. * (1. + self._wp + apiv * self._wa)) * \ np.exp(-3. * self._wa * z / zp1) def __repr__(self): retstr = "{0}H0={1:.3g}, Om0={2:.3g}, Ode0={3:.3g}, wp={4:.3g}, "\ "wa={5:.3g}, zp={6:.3g}, Tcmb0={7:.4g}, Neff={8:.3g}, "\ "m_nu={9}, Ob0={10:s})" return retstr.format(self._namelead(), self._H0, self._Om0, self._Ode0, self._wp, self._wa, self._zp, self._Tcmb0, self._Neff, self.m_nu, _float_or_none(self._Ob0)) class w0wzCDM(FLRW): """FLRW cosmology with a variable dark energy equation of state and curvature. The equation for the dark energy equation of state uses the simple form: :math:`w(z) = w_0 + w_z z`. This form is not recommended for z > 1. Parameters ---------- H0 : float or `~astropy.units.Quantity` Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. Ode0 : float Omega dark energy: density of dark energy in units of the critical density at z=0. w0 : float, optional Dark energy equation of state at z=0. This is pressure/density for dark energy in units where c=1. wz : float, optional Derivative of the dark energy equation of state with respect to z. A cosmological constant has w0=-1.0 and wz=0.0. Tcmb0 : float or scalar `~astropy.units.Quantity`, optional Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K]. Setting this to zero will turn off both photons and neutrinos (even massive ones). Neff : float, optional Effective number of Neutrino species. Default 3.04. m_nu : `~astropy.units.Quantity`, optional Mass of each neutrino species. If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Typically this means you should provide three neutrino masses unless you are considering something like a sterile neutrino. Ob0 : float or None, optional Omega baryons: density of baryonic matter in units of the critical density at z=0. If this is set to None (the default), any computation that requires its value will raise an exception. name : str, optional Name for this cosmological object. Examples -------- >>> from astropy.cosmology import w0wzCDM >>> cosmo = w0wzCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wz=0.2) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) """ def __init__(self, H0, Om0, Ode0, w0=-1., wz=0., Tcmb0=0, Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None): FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name, Ob0=Ob0) self._w0 = float(w0) self._wz = float(wz) # Please see "Notes about speeding up integrals" for discussion # about what is being done here. if self._Tcmb0.value == 0: self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc_norel self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._w0, self._wz) elif not self._massivenu: self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc_nomnu self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._Ogamma0 + self._Onu0, self._w0, self._wz) else: self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._Ogamma0, self._neff_per_nu, self._nmasslessnu, self._nu_y_list, self._w0, self._wz) @property def w0(self): """ Dark energy equation of state at z=0""" return self._w0 @property def wz(self): """ Derivative of the dark energy equation of state w.r.t. z""" return self._wz def w(self, z): """Returns dark energy equation of state at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Returns ------- w : ndarray, or float if input scalar The dark energy equation of state Notes ------ The dark energy equation of state is defined as :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the pressure at redshift z and :math:`\\rho(z)` is the density at redshift z, both in units where c=1. Here this is given by :math:`w(z) = w_0 + w_z z`. """ if isiterable(z): z = np.asarray(z) return self._w0 + self._wz * z def de_density_scale(self, z): r""" Evaluates the redshift dependence of the dark energy density. Parameters ---------- z : array-like Input redshifts. Returns ------- I : ndarray, or float if input scalar The scaling of the energy density of dark energy with redshift. Notes ----- The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`, and in this case is given by .. math:: I = \left(1 + z\right)^{3 \left(1 + w_0 - w_z\right)} \exp \left(-3 w_z z\right) """ if isiterable(z): z = np.asarray(z) zp1 = 1. + z return zp1 ** (3. * (1. + self._w0 - self._wz)) *\ np.exp(-3. * self._wz * z) def __repr__(self): retstr = "{0}H0={1:.3g}, Om0={2:.3g}, "\ "Ode0={3:.3g}, w0={4:.3g}, wz={5:.3g} Tcmb0={6:.4g}, "\ "Neff={7:.3g}, m_nu={8}, Ob0={9:s})" return retstr.format(self._namelead(), self._H0, self._Om0, self._Ode0, self._w0, self._wz, self._Tcmb0, self._Neff, self.m_nu, _float_or_none(self._Ob0)) def _float_or_none(x, digits=3): """ Helper function to format a variable that can be a float or None""" if x is None: return str(x) fmtstr = "{0:.{digits}g}".format(x, digits=digits) return fmtstr.format(x) def vectorize_if_needed(func, *x): """ Helper function to vectorize functions on array inputs""" if any(map(isiterable, x)): return np.vectorize(func)(*x) else: return func(*x) def inf_like(x): """Return the shape of x with value infinity and dtype='float'. Preserves 'shape' for both array and scalar inputs. But always returns a float array, even if x is of integer type. >>> inf_like(0.) # float scalar inf >>> inf_like(1) # integer scalar should give float output inf >>> inf_like([0., 1., 2., 3.]) # float list array([inf, inf, inf, inf]) >>> inf_like([0, 1, 2, 3]) # integer list should give float output array([inf, inf, inf, inf]) """ if np.isscalar(x): return np.inf else: return np.full_like(x, np.inf, dtype='float') # Pre-defined cosmologies. This loops over the parameter sets in the # parameters module and creates a LambdaCDM or FlatLambdaCDM instance # with the same name as the parameter set in the current module's namespace. # Note this assumes all the cosmologies in parameters are LambdaCDM, # which is true at least as of this writing. for key in parameters.available: par = getattr(parameters, key) if par['flat']: cosmo = FlatLambdaCDM(par['H0'], par['Om0'], Tcmb0=par['Tcmb0'], Neff=par['Neff'], m_nu=u.Quantity(par['m_nu'], u.eV), name=key, Ob0=par['Ob0']) docstr = "{} instance of FlatLambdaCDM cosmology\n\n(from {})" cosmo.__doc__ = docstr.format(key, par['reference']) else: cosmo = LambdaCDM(par['H0'], par['Om0'], par['Ode0'], Tcmb0=par['Tcmb0'], Neff=par['Neff'], m_nu=u.Quantity(par['m_nu'], u.eV), name=key, Ob0=par['Ob0']) docstr = "{} instance of LambdaCDM cosmology\n\n(from {})" cosmo.__doc__ = docstr.format(key, par['reference']) setattr(sys.modules[__name__], key, cosmo) # don't leave these variables floating around in the namespace del key, par, cosmo ######################################################################### # The science state below contains the current cosmology. ######################################################################### class default_cosmology(ScienceState): """ The default cosmology to use. To change it:: >>> from astropy.cosmology import default_cosmology, WMAP7 >>> with default_cosmology.set(WMAP7): ... # WMAP7 cosmology in effect Or, you may use a string:: >>> with default_cosmology.set('WMAP7'): ... # WMAP7 cosmology in effect """ _value = 'WMAP9' @staticmethod def get_cosmology_from_string(arg): """ Return a cosmology instance from a string. """ if arg == 'no_default': cosmo = None else: try: cosmo = getattr(sys.modules[__name__], arg) except AttributeError: s = "Unknown cosmology '{}'. Valid cosmologies:\n{}".format( arg, parameters.available) raise ValueError(s) return cosmo @classmethod def validate(cls, value): if value is None: value = 'Planck15' if isinstance(value, str): return cls.get_cosmology_from_string(value) elif isinstance(value, Cosmology): return value else: raise TypeError("default_cosmology must be a string or Cosmology instance.")
08874872cc35038346f2e60a1763fe703fe73a2724a6c16d5796436c5fd14038
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ astropy.cosmology contains classes and functions for cosmological distance measures and other cosmology-related calculations. See the `Astropy documentation <http://docs.astropy.org/en/latest/cosmology/index.html>`_ for more detailed usage examples and references. """ from .core import * from .funcs import *
2881e90fe7b522f60f3007975a0fbf46ff742c10720202bfeff31211b2dc5b99
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Convenience functions for `astropy.cosmology`. """ import warnings import numpy as np from .core import CosmologyError from ..units import Quantity __all__ = ['z_at_value'] __doctest_requires__ = {'*': ['scipy.integrate']} def z_at_value(func, fval, zmin=1e-8, zmax=1000, ztol=1e-8, maxfun=500): """ Find the redshift ``z`` at which ``func(z) = fval``. This finds the redshift at which one of the cosmology functions or methods (for example Planck13.distmod) is equal to a known value. .. warning:: Make sure you understand the behaviour of the function that you are trying to invert! Depending on the cosmology, there may not be a unique solution. For example, in the standard Lambda CDM cosmology, there are two redshifts which give an angular diameter distance of 1500 Mpc, z ~ 0.7 and z ~ 3.8. To force ``z_at_value`` to find the solution you are interested in, use the ``zmin`` and ``zmax`` keywords to limit the search range (see the example below). Parameters ---------- func : function or method A function that takes a redshift as input. fval : astropy.Quantity instance The value of ``func(z)``. zmin : float, optional The lower search limit for ``z``. Beware of divergences in some cosmological functions, such as distance moduli, at z=0 (default 1e-8). zmax : float, optional The upper search limit for ``z`` (default 1000). ztol : float, optional The relative error in ``z`` acceptable for convergence. maxfun : int, optional The maximum number of function evaluations allowed in the optimization routine (default 500). Returns ------- z : float The redshift ``z`` satisfying ``zmin < z < zmax`` and ``func(z) = fval`` within ``ztol``. Notes ----- This works for any arbitrary input cosmology, but is inefficient if you want to invert a large number of values for the same cosmology. In this case, it is faster to instead generate an array of values at many closely-spaced redshifts that cover the relevant redshift range, and then use interpolation to find the redshift at each value you're interested in. For example, to efficiently find the redshifts corresponding to 10^6 values of the distance modulus in a Planck13 cosmology, you could do the following: >>> import astropy.units as u >>> from astropy.cosmology import Planck13, z_at_value Generate 10^6 distance moduli between 24 and 43 for which we want to find the corresponding redshifts: >>> Dvals = (24 + np.random.rand(1e6) * 20) * u.mag Make a grid of distance moduli covering the redshift range we need using 50 equally log-spaced values between zmin and zmax. We use log spacing to adequately sample the steep part of the curve at low distance moduli: >>> zmin = z_at_value(Planck13.distmod, Dvals.min()) >>> zmax = z_at_value(Planck13.distmod, Dvals.max()) >>> zgrid = np.logspace(np.log10(zmin), np.log10(zmax), 50) >>> Dgrid = Planck13.distmod(zgrid) Finally interpolate to find the redshift at each distance modulus: >>> zvals = np.interp(Dvals.value, zgrid, Dgrid.value) Examples -------- >>> import astropy.units as u >>> from astropy.cosmology import Planck13, z_at_value The age and lookback time are monotonic with redshift, and so a unique solution can be found: >>> z_at_value(Planck13.age, 2 * u.Gyr) 3.19812268... The angular diameter is not monotonic however, and there are two redshifts that give a value of 1500 Mpc. Use the zmin and zmax keywords to find the one you're interested in: >>> z_at_value(Planck13.angular_diameter_distance, 1500 * u.Mpc, zmax=1.5) 0.6812769577... >>> z_at_value(Planck13.angular_diameter_distance, 1500 * u.Mpc, zmin=2.5) 3.7914913242... Also note that the luminosity distance and distance modulus (two other commonly inverted quantities) are monotonic in flat and open universes, but not in closed universes. """ from scipy.optimize import fminbound fval_zmin = func(zmin) fval_zmax = func(zmax) if np.sign(fval - fval_zmin) != np.sign(fval_zmax - fval): warnings.warn("""\ fval is not bracketed by func(zmin) and func(zmax). This means either there is no solution, or that there is more than one solution between zmin and zmax satisfying fval = func(z).""") if isinstance(fval_zmin, Quantity): val = fval.to_value(fval_zmin.unit) f = lambda z: abs(func(z).value - val) else: f = lambda z: abs(func(z) - fval) zbest, resval, ierr, ncall = fminbound(f, zmin, zmax, maxfun=maxfun, full_output=1, xtol=ztol) if ierr != 0: warnings.warn('Maximum number of function calls ({}) reached'.format( ncall)) if np.allclose(zbest, zmax): raise CosmologyError("Best guess z is very close the upper z limit.\n" "Try re-running with a different zmax.") elif np.allclose(zbest, zmin): raise CosmologyError("Best guess z is very close the lower z limit.\n" "Try re-running with a different zmin.") return zbest
181465fee7877fbffbeae8e71ec6078dc21e51d077ed750d17e24947a0374d14
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from numpy.testing import assert_equal from numpy.testing.utils import assert_allclose try: import scipy # pylint: disable=W0611 except ImportError: HAS_SCIPY = False else: HAS_SCIPY = True from ..jackknife import jackknife_resampling, jackknife_stats def test_jackknife_resampling(): data = np.array([1, 2, 3, 4]) answer = np.array([[2, 3, 4], [1, 3, 4], [1, 2, 4], [1, 2, 3]]) assert_equal(answer, jackknife_resampling(data)) # test jackknife stats, except confidence interval @pytest.mark.skipif('not HAS_SCIPY') def test_jackknife_stats(): # Test from the third example of Ref.[3] data = np.array((115, 170, 142, 138, 280, 470, 480, 141, 390)) # true estimate, bias, and std_err answer = (258.4444, 0.0, 50.25936) assert_allclose(answer, jackknife_stats(data, np.mean)[0:3], atol=1e-4) # test jackknife stats, including confidence intervals @pytest.mark.skipif('not HAS_SCIPY') def test_jackknife_stats_conf_interval(): # Test from the first example of Ref.[3] data = np.array([48, 42, 36, 33, 20, 16, 29, 39, 42, 38, 42, 36, 20, 15, 42, 33, 22, 20, 41, 43, 45, 34, 14, 22, 6, 7, 0, 15, 33, 34, 28, 29, 34, 41, 4, 13, 32, 38, 24, 25, 47, 27, 41, 41, 24, 28, 26, 14, 30, 28, 41, 40]) data = np.reshape(data, (-1, 2)) data = data[:, 1] # true estimate, bias, and std_err answer = (113.7862, -4.376391, 22.26572) # calculate the mle of the variance (biased estimator!) def mle_var(x): return np.sum((x - np.mean(x))*(x - np.mean(x)))/len(x) assert_allclose(answer, jackknife_stats(data, mle_var, 0.95)[0:3], atol=1e-4) # test confidence interval answer = np.array((70.14615, 157.42616)) assert_allclose(answer, jackknife_stats(data, mle_var, 0.95)[3], atol=1e-4)
d13b4b97493ca818854d90491f3bfaabcd8ea9ceb5597e196bbb665632110868
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from numpy.testing import assert_allclose from .. import histogram, scott_bin_width, freedman_bin_width, knuth_bin_width try: import scipy # pylint: disable=W0611 except ImportError: HAS_SCIPY = False else: HAS_SCIPY = True def test_scott_bin_width(N=10000, rseed=0): rng = np.random.RandomState(rseed) X = rng.randn(N) delta = scott_bin_width(X) assert_allclose(delta, 3.5 * np.std(X) / N ** (1 / 3)) delta, bins = scott_bin_width(X, return_bins=True) assert_allclose(delta, 3.5 * np.std(X) / N ** (1 / 3)) with pytest.raises(ValueError): scott_bin_width(rng.rand(2, 10)) def test_freedman_bin_width(N=10000, rseed=0): rng = np.random.RandomState(rseed) X = rng.randn(N) v25, v75 = np.percentile(X, [25, 75]) delta = freedman_bin_width(X) assert_allclose(delta, 2 * (v75 - v25) / N ** (1 / 3)) delta, bins = freedman_bin_width(X, return_bins=True) assert_allclose(delta, 2 * (v75 - v25) / N ** (1 / 3)) with pytest.raises(ValueError): freedman_bin_width(rng.rand(2, 10)) @pytest.mark.skipif('not HAS_SCIPY') def test_knuth_bin_width(N=10000, rseed=0): rng = np.random.RandomState(rseed) X = rng.randn(N) dx, bins = knuth_bin_width(X, return_bins=True) assert_allclose(len(bins), 59) dx2 = knuth_bin_width(X) assert dx == dx2 with pytest.raises(ValueError): knuth_bin_width(rng.rand(2, 10)) @pytest.mark.skipif('not HAS_SCIPY') def test_knuth_histogram(N=1000, rseed=0): rng = np.random.RandomState(rseed) x = rng.randn(N) counts, bins = histogram(x, 'knuth') assert (counts.sum() == len(x)) assert (len(counts) == len(bins) - 1) def test_histogram(N=1000, rseed=0): rng = np.random.RandomState(rseed) x = rng.randn(N) for bins in [30, np.linspace(-5, 5, 31), 'scott', 'freedman', 'blocks']: counts, bins = histogram(x, bins) assert (counts.sum() == len(x)) assert (len(counts) == len(bins) - 1) def test_histogram_range(N=1000, rseed=0): rng = np.random.RandomState(rseed) x = rng.randn(N) range = (0.1, 0.8) for bins in ['scott', 'freedman', 'blocks']: counts, bins = histogram(x, bins, range=range) @pytest.mark.skipif('not HAS_SCIPY') def test_histogram_output_knuth(): rng = np.random.RandomState(0) X = rng.randn(100) counts, bins = histogram(X, bins='knuth') assert_allclose(counts, [1, 6, 9, 14, 21, 22, 12, 8, 7]) assert_allclose(bins, [-2.55298982, -2.01712932, -1.48126883, -0.94540834, -0.40954784, 0.12631265, 0.66217314, 1.19803364, 1.73389413, 2.26975462]) def test_histogram_output(): rng = np.random.RandomState(0) X = rng.randn(100) counts, bins = histogram(X, bins=10) assert_allclose(counts, [1, 5, 7, 13, 17, 18, 16, 11, 7, 5]) assert_allclose(bins, [-2.55298982, -2.07071537, -1.58844093, -1.10616648, -0.62389204, -0.1416176, 0.34065685, 0.82293129, 1.30520574, 1.78748018, 2.26975462]) counts, bins = histogram(X, bins='scott') assert_allclose(counts, [2, 13, 23, 34, 16, 10, 2]) assert_allclose(bins, [-2.55298982, -1.79299405, -1.03299829, -0.27300252, 0.48699324, 1.24698901, 2.00698477, 2.76698054]) counts, bins = histogram(X, bins='freedman') assert_allclose(counts, [2, 7, 13, 20, 26, 14, 11, 5, 2]) assert_allclose(bins, [-2.55298982, -1.95796338, -1.36293694, -0.7679105, -0.17288406, 0.42214237, 1.01716881, 1.61219525, 2.20722169, 2.80224813]) counts, bins = histogram(X, bins='blocks') assert_allclose(counts, [10, 61, 29]) assert_allclose(bins, [-2.55298982, -1.24381059, 0.46422235, 2.26975462]) def test_histogram_badargs(N=1000, rseed=0): rng = np.random.RandomState(rseed) x = rng.randn(N) # weights is not supported for bins in ['scott', 'freedman', 'blocks']: with pytest.raises(NotImplementedError): histogram(x, bins, weights=x) # bad bins arg gives ValueError with pytest.raises(ValueError): histogram(x, bins='bad_argument')
f6277e7c5dd8d266a3dd3c9741a0ef97964378c117e31f6f812363be76da3058
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from numpy.random import randn from numpy.testing import assert_equal, assert_allclose try: from scipy import stats # used in testing except ImportError: HAS_SCIPY = False else: HAS_SCIPY = True from ..sigma_clipping import sigma_clip, SigmaClip, sigma_clipped_stats from ...utils.misc import NumpyRNGContext def test_sigma_clip(): # need to seed the numpy RNG to make sure we don't get some # amazingly flukey random number that breaks one of the tests with NumpyRNGContext(12345): # Amazing, I've got the same combination on my luggage! randvar = randn(10000) filtered_data = sigma_clip(randvar, sigma=1, iters=2) assert sum(filtered_data.mask) > 0 assert sum(~filtered_data.mask) < randvar.size # this is actually a silly thing to do, because it uses the # standard deviation as the variance, but it tests to make sure # these arguments are actually doing something filtered_data2 = sigma_clip(randvar, sigma=1, iters=2, stdfunc=np.var) assert not np.all(filtered_data.mask == filtered_data2.mask) filtered_data3 = sigma_clip(randvar, sigma=1, iters=2, cenfunc=np.mean) assert not np.all(filtered_data.mask == filtered_data3.mask) # make sure the iters=None method works at all. filtered_data = sigma_clip(randvar, sigma=3, iters=None) # test copying assert filtered_data.data[0] == randvar[0] filtered_data.data[0] += 1. assert filtered_data.data[0] != randvar[0] filtered_data = sigma_clip(randvar, sigma=3, iters=None, copy=False) assert filtered_data.data[0] == randvar[0] filtered_data.data[0] += 1. assert filtered_data.data[0] == randvar[0] # test axis data = np.arange(5) + np.random.normal(0., 0.05, (5, 5)) + \ np.diag(np.ones(5)) filtered_data = sigma_clip(data, axis=0, sigma=2.3) assert filtered_data.count() == 20 filtered_data = sigma_clip(data, axis=1, sigma=2.3) assert filtered_data.count() == 25 @pytest.mark.skipif('not HAS_SCIPY') def test_compare_to_scipy_sigmaclip(): # need to seed the numpy RNG to make sure we don't get some # amazingly flukey random number that breaks one of the tests with NumpyRNGContext(12345): randvar = randn(10000) astropyres = sigma_clip(randvar, sigma=3, iters=None, cenfunc=np.mean) scipyres = stats.sigmaclip(randvar, 3, 3)[0] assert astropyres.count() == len(scipyres) assert_equal(astropyres[~astropyres.mask].data, scipyres) def test_sigma_clip_scalar_mask(): """Test that the returned mask is not a scalar.""" data = np.arange(5) result = sigma_clip(data, sigma=100., iters=1) assert result.mask.shape != () def test_sigma_clip_class(): with NumpyRNGContext(12345): data = randn(100) data[10] = 1.e5 sobj = SigmaClip(sigma=1, iters=2) sfunc = sigma_clip(data, sigma=1, iters=2) assert_equal(sobj(data), sfunc) def test_sigma_clipped_stats(): """Test list data with input mask or mask_value (#3268).""" # test list data with mask data = [0, 1] mask = np.array([True, False]) result = sigma_clipped_stats(data, mask=mask) # Check that the result of np.ma.median was converted to a scalar assert isinstance(result[1], float) assert result == (1., 1., 0.) # test list data with mask_value result = sigma_clipped_stats(data, mask_value=0.) assert isinstance(result[1], float) assert result == (1., 1., 0.) # test without mask data = [0, 2] result = sigma_clipped_stats(data) assert isinstance(result[1], float) assert result == (1., 1., 1.) _data = np.arange(10) data = np.ma.MaskedArray([_data, _data, 10 * _data]) mean = sigma_clip(data, axis=0, sigma=1).mean(axis=0) assert_equal(mean, _data) mean, median, stddev = sigma_clipped_stats(data, axis=0, sigma=1) assert_equal(mean, _data) assert_equal(median, _data) assert_equal(stddev, np.zeros_like(_data)) def test_sigma_clipped_stats_ddof(): with NumpyRNGContext(12345): data = randn(10000) data[10] = 1.e5 mean1, median1, stddev1 = sigma_clipped_stats(data) mean2, median2, stddev2 = sigma_clipped_stats(data, std_ddof=1) assert mean1 == mean2 assert median1 == median2 assert_allclose(stddev1, 0.98156805711673156) assert_allclose(stddev2, 0.98161731654802831) def test_invalid_sigma_clip(): """Test sigma_clip of data containing invalid values.""" data = np.ones((5, 5)) data[2, 2] = 1000 data[3, 4] = np.nan data[1, 1] = np.inf result = sigma_clip(data) # Pre #4051 if data contains any NaN or infs sigma_clip returns the # mask containing `False` only or TypeError if data also contains a # masked value. assert result.mask[2, 2] assert result.mask[3, 4] assert result.mask[1, 1] def test_sigmaclip_negative_axis(): """Test that dimensions are expanded correctly even if axis is negative.""" data = np.ones((3, 4)) # without correct expand_dims this would raise a ValueError sigma_clip(data, axis=-1) def test_sigmaclip_fully_masked(): """Make sure a fully masked array is returned when sigma clipping a fully masked array. """ data = np.ma.MaskedArray(data=[[1., 0.], [0., 1.]], mask=[[True, True], [True, True]]) clipped_data = sigma_clip(data) np.ma.allequal(data, clipped_data) def test_sigmaclip_empty_masked(): """Make sure a empty masked array is returned when sigma clipping an empty masked array. """ data = np.ma.MaskedArray(data=[], mask=[]) clipped_data = sigma_clip(data) np.ma.allequal(data, clipped_data) def test_sigmaclip_empty(): """Make sure a empty array is returned when sigma clipping an empty array. """ data = np.array([]) clipped_data = sigma_clip(data) assert_equal(data, clipped_data)
a00037ada68ee1767301a48202cb121893bf0439d66b14c76a43e8c5a5c5b791
from numpy.testing.utils import assert_allclose from ..info_theory import bayesian_info_criterion, bayesian_info_criterion_lsq from ..info_theory import akaike_info_criterion, akaike_info_criterion_lsq def test_bayesian_info_criterion(): # This test is from an example presented in Ref [1] lnL = (-176.4, -173.0) n_params = (2, 3) n_samples = 100 answer = 2.195 bic_g = bayesian_info_criterion(lnL[0], n_params[0], n_samples) bic_t = bayesian_info_criterion(lnL[1], n_params[1], n_samples) assert_allclose(answer, bic_g - bic_t, atol=1e-1) def test_akaike_info_criterion(): # This test is from an example presented in Ref [2] n_samples = 121 lnL = (-3.54, -4.17) n_params = (6, 5) answer = 0.95 aic_1 = akaike_info_criterion(lnL[0], n_params[0], n_samples) aic_2 = akaike_info_criterion(lnL[1], n_params[1], n_samples) assert_allclose(answer, aic_1 - aic_2, atol=1e-2) def test_akaike_info_criterion_lsq(): # This test is from an example presented in Ref [1] n_samples = 100 n_params = (4, 3, 3) ssr = (25.0, 26.0, 27.0) answer = (-130.21, -128.46, -124.68) assert_allclose(answer[0], akaike_info_criterion_lsq(ssr[0], n_params[0], n_samples), atol=1e-2) assert_allclose(answer[1], akaike_info_criterion_lsq(ssr[1], n_params[1], n_samples), atol=1e-2) assert_allclose(answer[2], akaike_info_criterion_lsq(ssr[2], n_params[2], n_samples), atol=1e-2) def test_bayesian_info_criterion_lsq(): """This test is from: http://www.statoek.wiso.uni-goettingen.de/veranstaltungen/non_semi_models/ AkaikeLsg.pdf Note that in there, they compute a "normalized BIC". Therefore, the answers presented here are recalculated versions based on their values. """ n_samples = 25 n_params = (1, 2, 1) ssr = (48959, 32512, 37980) answer = (192.706, 185.706, 186.360) assert_allclose(answer[0], bayesian_info_criterion_lsq(ssr[0], n_params[0], n_samples), atol=1e-2) assert_allclose(answer[1], bayesian_info_criterion_lsq(ssr[1], n_params[1], n_samples), atol=1e-2) assert_allclose(answer[2], bayesian_info_criterion_lsq(ssr[2], n_params[2], n_samples), atol=1e-2)
0ed6487d0d11474c394151239db96229df0774ae2a18cef11fb126d413fe228e
import pytest import numpy as np from numpy.testing import assert_equal from numpy.testing.utils import assert_allclose from astropy import units as u try: import scipy.stats except ImportError: HAS_SCIPY = False else: HAS_SCIPY = True from ..circstats import _length, circmean, circvar, circmoment, circcorrcoef from ..circstats import rayleightest, vtest, vonmisesmle def test__length(): # testing against R CircStats package # Ref. [1] pages 6 and 125 weights = np.array([12, 1, 6, 1, 2, 1, 1]) answer = 0.766282 data = np.array([0, 3.6, 36, 72, 108, 169.2, 324])*u.deg assert_allclose(answer, _length(data, weights=weights), atol=1e-4) def test_circmean(): # testing against R CircStats package # Ref[1], page 23 data = np.array([51, 67, 40, 109, 31, 358])*u.deg answer = 48.63*u.deg assert_equal(answer, np.around(circmean(data), 2)) @pytest.mark.skipif('not HAS_SCIPY') def test_circmean_against_scipy(): # testing against scipy.stats.circmean function # the data is the same as the test before, but in radians data = np.array([0.89011792, 1.1693706, 0.6981317, 1.90240888, 0.54105207, 6.24827872]) answer = scipy.stats.circmean(data) assert_equal(np.around(answer, 2), np.around(circmean(data), 2)) def test_circvar(): # testing against R CircStats package # Ref[1], page 23 data = np.array([51, 67, 40, 109, 31, 358])*u.deg answer = 0.1635635 assert_allclose(answer, circvar(data), atol=1e-4) def test_circmoment(): # testing against R CircStats package # Ref[1], page 23 data = np.array([51, 67, 40, 109, 31, 358])*u.deg # 2nd, 3rd, and 4th moments # this is the answer given in Ref[1] in radians answer = np.array([1.588121, 1.963919, 2.685556]) answer = np.around(np.rad2deg(answer)*u.deg, 4) result = (np.around(circmoment(data, p=2)[0], 4), np.around(circmoment(data, p=3)[0], 4), np.around(circmoment(data, p=4)[0], 4)) assert_equal(answer[0], result[0]) assert_equal(answer[1], result[1]) assert_equal(answer[2], result[2]) # testing lengths answer = np.array([0.4800428, 0.236541, 0.2255761]) assert_allclose(answer, (circmoment(data, p=2)[1], circmoment(data, p=3)[1], circmoment(data, p=4)[1]), atol=1e-4) def test_circcorrcoef(): # testing against R CircStats package # Ref[1], page 180 alpha = np.array([356, 97, 211, 232, 343, 292, 157, 302, 335, 302, 324, 85, 324, 340, 157, 238, 254, 146, 232, 122, 329])*u.deg beta = np.array([119, 162, 221, 259, 270, 29, 97, 292, 40, 313, 94, 45, 47, 108, 221, 270, 119, 248, 270, 45, 23])*u.deg answer = 0.2704648 assert_allclose(answer, circcorrcoef(alpha, beta), atol=1e-4) def test_rayleightest(): # testing against R CircStats package data = np.array([190.18, 175.48, 155.95, 217.83, 156.36])*u.deg # answer was obtained through R CircStats function r.test(x) answer = (0.00640418, 0.9202565) result = (rayleightest(data), _length(data)) assert_allclose(answer[0], result[0], atol=1e-4) assert_allclose(answer[1], result[1], atol=1e-4) @pytest.mark.skipif('not HAS_SCIPY') def test_vtest(): # testing against R CircStats package data = np.array([190.18, 175.48, 155.95, 217.83, 156.36])*u.deg # answer was obtained through R CircStats function v0.test(x) answer = 0.9994725 assert_allclose(answer, vtest(data), atol=1e-5) def test_vonmisesmle(): # testing against R CircStats package # testing non-Quantity data = np.array([3.3699057, 4.0411630, 0.5014477, 2.6223103, 3.7336524, 1.8136389, 4.1566039, 2.7806317, 2.4672173, 2.8493644]) # answer was obtained through R CircStats function vm.ml(x) answer = (3.006514, 1.474132) assert_allclose(answer[0], vonmisesmle(data)[0], atol=1e-5) assert_allclose(answer[1], vonmisesmle(data)[1], atol=1e-5) # testing with Quantity data = np.rad2deg(data)*u.deg answer = np.rad2deg(3.006514)*u.deg assert_equal(np.around(answer, 3), np.around(vonmisesmle(data)[0], 3))
9efac287544f8ce4ab27db275ab81e37c59530dc59da9fe6c4577e7663a3ecb4
import numpy as np import pytest from numpy.testing.utils import assert_allclose from ..spatial import RipleysKEstimator from ...utils.misc import NumpyRNGContext a = np.array([[1, 4], [2, 5], [3, 6]]) b = np.array([[-1, 1], [-2, 2], [-3, 3]]) @pytest.mark.parametrize("points, x_min, x_max", [(a, 0, 10), (b, -5, 5)]) def test_ripley_K_implementation(points, x_min, x_max): """ Test against Ripley's K function implemented in R package `spatstat` +-+---------+---------+----------+---------+-+ 6 + * + | | | | 5.5 + + | | | | 5 + * + | | 4.5 + + | | | | 4 + * + +-+---------+---------+----------+---------+-+ 1 1.5 2 2.5 3 +-+---------+---------+----------+---------+-+ 3 + * + | | | | 2.5 + + | | | | 2 + * + | | 1.5 + + | | | | 1 + * + +-+---------+---------+----------+---------+-+ -3 -2.5 -2 -1.5 -1 """ area = 100 r = np.linspace(0, 2.5, 5) Kest = RipleysKEstimator(area=area, x_min=x_min, y_min=x_min, x_max=x_max, y_max=x_max) ANS_NONE = np.array([0, 0, 0, 66.667, 66.667]) assert_allclose(ANS_NONE, Kest(data=points, radii=r, mode='none'), atol=1e-3) ANS_TRANS = np.array([0, 0, 0, 82.304, 82.304]) assert_allclose(ANS_TRANS, Kest(data=points, radii=r, mode='translation'), atol=1e-3) with NumpyRNGContext(123): a = np.random.uniform(low=5, high=10, size=(100, 2)) b = np.random.uniform(low=-5, high=-10, size=(100, 2)) @pytest.mark.parametrize("points", [a, b]) def test_ripley_uniform_property(points): # Ripley's K function without edge-correction converges to the area when # the number of points and the argument radii are large enough, i.e., # K(x) --> area as x --> inf area = 50 Kest = RipleysKEstimator(area=area) r = np.linspace(0, 20, 5) assert_allclose(area, Kest(data=points, radii=r, mode='none')[4]) with NumpyRNGContext(123): a = np.random.uniform(low=0, high=1, size=(500, 2)) b = np.random.uniform(low=-1, high=0, size=(500, 2)) @pytest.mark.parametrize("points, low, high", [(a, 0, 1), (b, -1, 0)]) def test_ripley_large_density(points, low, high): Kest = RipleysKEstimator(area=1, x_min=low, x_max=high, y_min=low, y_max=high) r = np.linspace(0, 0.25, 25) Kpos = Kest.poisson(r) modes = ['ohser', 'translation', 'ripley'] for m in modes: Kest_r = Kest(data=points, radii=r, mode=m) assert_allclose(Kpos, Kest_r, atol=1e-1) with NumpyRNGContext(123): a = np.random.uniform(low=5, high=10, size=(500, 2)) b = np.random.uniform(low=-10, high=-5, size=(500, 2)) @pytest.mark.parametrize("points, low, high", [(a, 5, 10), (b, -10, -5)]) def test_ripley_modes(points, low, high): Kest = RipleysKEstimator(area=25, x_max=high, y_max=high, x_min=low, y_min=low) r = np.linspace(0, 1.2, 25) Kpos_mean = np.mean(Kest.poisson(r)) modes = ['ohser', 'translation', 'ripley'] for m in modes: Kest_mean = np.mean(Kest(data=points, radii=r, mode=m)) assert_allclose(Kpos_mean, Kest_mean, atol=1e-1, rtol=1e-1) with NumpyRNGContext(123): a = np.random.uniform(low=0, high=1, size=(50, 2)) b = np.random.uniform(low=-1, high=0, size=(50, 2)) @pytest.mark.parametrize("points, low, high", [(a, 0, 1), (b, -1, 0)]) def test_ripley_large_density_var_width(points, low, high): Kest = RipleysKEstimator(area=1, x_min=low, x_max=high, y_min=low, y_max=high) r = np.linspace(0, 0.25, 25) Kpos = Kest.poisson(r) Kest_r = Kest(data=points, radii=r, mode='var-width') assert_allclose(Kpos, Kest_r, atol=1e-1) with NumpyRNGContext(123): a = np.random.uniform(low=5, high=10, size=(50, 2)) b = np.random.uniform(low=-10, high=-5, size=(50, 2)) @pytest.mark.parametrize("points, low, high", [(a, 5, 10), (b, -10, -5)]) def test_ripley_var_width(points, low, high): Kest = RipleysKEstimator(area=25, x_max=high, y_max=high, x_min=low, y_min=low) r = np.linspace(0, 1.2, 25) Kest_ohser = np.mean(Kest(data=points, radii=r, mode='ohser')) Kest_var_width = np.mean(Kest(data=points, radii=r, mode='var-width')) assert_allclose(Kest_ohser, Kest_var_width, atol=1e-1, rtol=1e-1)
1c832163ee1f202eec541f019c6890613b6b33ddd997f4bc40375a3f3187bd3d
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from numpy.testing import assert_allclose from .. import bayesian_blocks, RegularEvents def test_single_change_point(rseed=0): rng = np.random.RandomState(rseed) x = np.concatenate([rng.rand(100), 1 + rng.rand(200)]) bins = bayesian_blocks(x) assert (len(bins) == 3) assert_allclose(bins[1], 1, rtol=0.02) def test_duplicate_events(rseed=0): rng = np.random.RandomState(rseed) t = rng.rand(100) t[80:] = t[:20] x = np.ones_like(t) x[:20] += 1 bins1 = bayesian_blocks(t) bins2 = bayesian_blocks(t[:80], x[:80]) assert_allclose(bins1, bins2) def test_measures_fitness_homoscedastic(rseed=0): rng = np.random.RandomState(rseed) t = np.linspace(0, 1, 11) x = np.exp(-0.5 * (t - 0.5) ** 2 / 0.01 ** 2) sigma = 0.05 x = x + sigma * rng.randn(len(x)) bins = bayesian_blocks(t, x, sigma, fitness='measures') assert_allclose(bins, [0, 0.45, 0.55, 1]) def test_measures_fitness_heteroscedastic(): rng = np.random.RandomState(1) t = np.linspace(0, 1, 11) x = np.exp(-0.5 * (t - 0.5) ** 2 / 0.01 ** 2) sigma = 0.02 + 0.02 * rng.rand(len(x)) x = x + sigma * rng.randn(len(x)) bins = bayesian_blocks(t, x, sigma, fitness='measures') assert_allclose(bins, [0, 0.45, 0.55, 1]) def test_regular_events(): rng = np.random.RandomState(0) dt = 0.01 steps = np.concatenate([np.unique(rng.randint(0, 500, 100)), np.unique(rng.randint(500, 1000, 200))]) t = dt * steps # string fitness bins1 = bayesian_blocks(t, fitness='regular_events', dt=dt) assert (len(bins1) == 3) assert_allclose(bins1[1], 5, rtol=0.05) # class name fitness bins2 = bayesian_blocks(t, fitness=RegularEvents, dt=dt) assert_allclose(bins1, bins2) # class instance fitness bins3 = bayesian_blocks(t, fitness=RegularEvents(dt=dt)) assert_allclose(bins1, bins3) def test_errors(): rng = np.random.RandomState(0) t = rng.rand(100) # x must be integer or None for events with pytest.raises(ValueError): bayesian_blocks(t, fitness='events', x=t) # x must be binary for regular events with pytest.raises(ValueError): bayesian_blocks(t, fitness='regular_events', x=10 * t, dt=1) # x must be specified for measures with pytest.raises(ValueError): bayesian_blocks(t, fitness='measures') # sigma cannot be specified without x with pytest.raises(ValueError): bayesian_blocks(t, fitness='events', sigma=0.5) # length of x must match length of t with pytest.raises(ValueError): bayesian_blocks(t, fitness='measures', x=t[:-1]) # repeated values in t fail when x is specified t2 = t.copy() t2[1] = t2[0] with pytest.raises(ValueError): bayesian_blocks(t2, fitness='measures', x=t) # sigma must be broadcastable with x with pytest.raises(ValueError): bayesian_blocks(t, fitness='measures', x=t, sigma=t[:-1]) def test_fitness_function_results(): """Test results for several fitness functions""" rng = np.random.RandomState(42) # Event Data t = rng.randn(100) edges = bayesian_blocks(t, fitness='events') assert_allclose(edges, [-2.6197451, -0.71094865, 0.36866702, 1.85227818]) # Event data with repeats t[80:] = t[:20] edges = bayesian_blocks(t, fitness='events', p0=0.01) assert_allclose(edges, [-2.6197451, -0.47432431, -0.46202823, 1.85227818]) # Regular event data dt = 0.01 t = dt * np.arange(1000) x = np.zeros(len(t)) N = len(t) // 10 x[rng.randint(0, len(t), N)] = 1 x[rng.randint(0, len(t) // 2, N)] = 1 edges = bayesian_blocks(t, x, fitness='regular_events', dt=dt) assert_allclose(edges, [0, 5.105, 9.99]) # Measured point data with errors t = 100 * rng.rand(20) x = np.exp(-0.5 * (t - 50) ** 2) sigma = 0.1 x_obs = x + sigma * rng.randn(len(x)) edges = bayesian_blocks(t, x_obs, sigma, fitness='measures') assert_allclose(edges, [4.360377, 48.456895, 52.597917, 99.455051])
c2b9a942a046b5215ac144303cb414c8ce8562f5e5b304f1fdec8d2c29ceb14f
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from numpy.random import randn, normal from numpy.testing import assert_equal from numpy.testing.utils import assert_allclose try: import scipy # pylint: disable=W0611 except ImportError: HAS_SCIPY = False else: HAS_SCIPY = True try: import mpmath # pylint: disable=W0611 except ImportError: HAS_MPMATH = False else: HAS_MPMATH = True from .. import funcs from ... import units as u from ...tests.helper import catch_warnings from ...utils.misc import NumpyRNGContext def test_median_absolute_deviation(): with NumpyRNGContext(12345): # test that it runs randvar = randn(10000) mad = funcs.median_absolute_deviation(randvar) # test whether an array is returned if an axis is used randvar = randvar.reshape((10, 1000)) mad = funcs.median_absolute_deviation(randvar, axis=1) assert len(mad) == 10 assert mad.size < randvar.size mad = funcs.median_absolute_deviation(randvar, axis=0) assert len(mad) == 1000 assert mad.size < randvar.size # Test some actual values in a 3 dimensional array x = np.arange(3 * 4 * 5) a = np.array([sum(x[:i + 1]) for i in range(len(x))]).reshape(3, 4, 5) mad = funcs.median_absolute_deviation(a) assert mad == 389.5 mad = funcs.median_absolute_deviation(a, axis=0) assert_allclose(mad, [[210., 230., 250., 270., 290.], [310., 330., 350., 370., 390.], [410., 430., 450., 470., 490.], [510., 530., 550., 570., 590.]]) mad = funcs.median_absolute_deviation(a, axis=1) assert_allclose(mad, [[27.5, 32.5, 37.5, 42.5, 47.5], [127.5, 132.5, 137.5, 142.5, 147.5], [227.5, 232.5, 237.5, 242.5, 247.5]]) mad = funcs.median_absolute_deviation(a, axis=2) assert_allclose(mad, [[3., 8., 13., 18.], [23., 28., 33., 38.], [43., 48., 53., 58.]]) def test_median_absolute_deviation_masked(): # Based on the changes introduces in #4658 # normal masked arrays without masked values are handled like normal # numpy arrays array = np.ma.array([1, 2, 3]) assert funcs.median_absolute_deviation(array) == 1 # masked numpy arrays return something different (rank 0 masked array) # but one can still compare it without np.all! array = np.ma.array([1, 4, 3], mask=[0, 1, 0]) assert funcs.median_absolute_deviation(array) == 1 # Just cross check if that's identical to the function on the unmasked # values only assert funcs.median_absolute_deviation(array) == ( funcs.median_absolute_deviation(array[~array.mask])) # Multidimensional masked array array = np.ma.array([[1, 4], [2, 2]], mask=[[1, 0], [0, 0]]) funcs.median_absolute_deviation(array) assert funcs.median_absolute_deviation(array) == 0 # Just to compare it with the data without mask: assert funcs.median_absolute_deviation(array.data) == 0.5 # And check if they are also broadcasted correctly np.testing.assert_array_equal( funcs.median_absolute_deviation(array, axis=0).data, [0, 1]) np.testing.assert_array_equal( funcs.median_absolute_deviation(array, axis=1).data, [0, 0]) def test_median_absolute_deviation_nans(): array = np.array([[1, 4, 3, np.nan], [2, 5, np.nan, 4]]) assert_equal(funcs.median_absolute_deviation(array, func=np.nanmedian, axis=1), [1, 1]) array = np.ma.masked_invalid(array) assert funcs.median_absolute_deviation(array) == 1 def test_median_absolute_deviation_multidim_axis(): array = np.ones((5, 4, 3)) * np.arange(5)[:, np.newaxis, np.newaxis] assert_equal(funcs.median_absolute_deviation(array, axis=(1, 2)), np.zeros(5)) assert_equal(funcs.median_absolute_deviation( array, axis=np.array([1, 2])), np.zeros(5)) def test_median_absolute_deviation_quantity(): # Based on the changes introduces in #4658 # Just a small test that this function accepts Quantities and returns a # quantity a = np.array([1, 16, 5]) * u.m mad = funcs.median_absolute_deviation(a) # Check for the correct unit and that the result is identical to the # result without units. assert mad.unit == a.unit assert mad.value == funcs.median_absolute_deviation(a.value) @pytest.mark.skipif('not HAS_SCIPY') def test_binom_conf_interval(): # Test Wilson and Jeffreys interval for corner cases: # Corner cases: k = 0, k = n, conf = 0., conf = 1. n = 5 k = [0, 4, 5] for conf in [0., 0.5, 1.]: res = funcs.binom_conf_interval(k, n, conf=conf, interval='wilson') assert ((res >= 0.) & (res <= 1.)).all() res = funcs.binom_conf_interval(k, n, conf=conf, interval='jeffreys') assert ((res >= 0.) & (res <= 1.)).all() # Test Jeffreys interval accuracy against table in Brown et al. (2001). # (See `binom_conf_interval` docstring for reference.) k = [0, 1, 2, 3, 4] n = 7 conf = 0.95 result = funcs.binom_conf_interval(k, n, conf=conf, interval='jeffreys') table = np.array([[0.000, 0.016, 0.065, 0.139, 0.234], [0.292, 0.501, 0.648, 0.766, 0.861]]) assert_allclose(result, table, atol=1.e-3, rtol=0.) # Test scalar version result = np.array([funcs.binom_conf_interval(kval, n, conf=conf, interval='jeffreys') for kval in k]).transpose() assert_allclose(result, table, atol=1.e-3, rtol=0.) # Test flat result = funcs.binom_conf_interval(k, n, conf=conf, interval='flat') table = np.array([[0., 0.03185, 0.08523, 0.15701, 0.24486], [0.36941, 0.52650, 0.65085, 0.75513, 0.84298]]) assert_allclose(result, table, atol=1.e-3, rtol=0.) # Test scalar version result = np.array([funcs.binom_conf_interval(kval, n, conf=conf, interval='flat') for kval in k]).transpose() assert_allclose(result, table, atol=1.e-3, rtol=0.) # Test Wald interval result = funcs.binom_conf_interval(0, 5, interval='wald') assert_allclose(result, 0.) # conf interval is [0, 0] when k = 0 result = funcs.binom_conf_interval(5, 5, interval='wald') assert_allclose(result, 1.) # conf interval is [1, 1] when k = n result = funcs.binom_conf_interval(500, 1000, conf=0.68269, interval='wald') assert_allclose(result[0], 0.5 - 0.5 / np.sqrt(1000.)) assert_allclose(result[1], 0.5 + 0.5 / np.sqrt(1000.)) # Test shapes k = 3 n = 7 for interval in ['wald', 'wilson', 'jeffreys', 'flat']: result = funcs.binom_conf_interval(k, n, interval=interval) assert result.shape == (2,) k = np.array(k) for interval in ['wald', 'wilson', 'jeffreys', 'flat']: result = funcs.binom_conf_interval(k, n, interval=interval) assert result.shape == (2,) n = np.array(n) for interval in ['wald', 'wilson', 'jeffreys', 'flat']: result = funcs.binom_conf_interval(k, n, interval=interval) assert result.shape == (2,) k = np.array([1, 3, 5]) for interval in ['wald', 'wilson', 'jeffreys', 'flat']: result = funcs.binom_conf_interval(k, n, interval=interval) assert result.shape == (2, 3) n = np.array([5, 5, 5]) for interval in ['wald', 'wilson', 'jeffreys', 'flat']: result = funcs.binom_conf_interval(k, n, interval=interval) assert result.shape == (2, 3) @pytest.mark.skipif('not HAS_SCIPY') def test_binned_binom_proportion(): # Check that it works. nbins = 20 x = np.linspace(0., 10., 100) # Guarantee an `x` in every bin. success = np.ones(len(x), dtype=bool) bin_ctr, bin_hw, p, perr = funcs.binned_binom_proportion(x, success, bins=nbins) # Check shape of outputs assert bin_ctr.shape == (nbins,) assert bin_hw.shape == (nbins,) assert p.shape == (nbins,) assert perr.shape == (2, nbins) # Check that p is 1 in all bins, since success = True for all `x`. assert (p == 1.).all() # Check that p is 0 in all bins if success = False for all `x`. success[:] = False bin_ctr, bin_hw, p, perr = funcs.binned_binom_proportion(x, success, bins=nbins) assert (p == 0.).all() def test_signal_to_noise_oir_ccd(): result = funcs.signal_to_noise_oir_ccd(1, 25, 0, 0, 0, 1) assert 5.0 == result # check to make sure gain works result = funcs.signal_to_noise_oir_ccd(1, 5, 0, 0, 0, 1, 5) assert 5.0 == result # now add in sky, dark current, and read noise # make sure the snr goes down result = funcs.signal_to_noise_oir_ccd(1, 25, 1, 0, 0, 1) assert result < 5.0 result = funcs.signal_to_noise_oir_ccd(1, 25, 0, 1, 0, 1) assert result < 5.0 result = funcs.signal_to_noise_oir_ccd(1, 25, 0, 0, 1, 1) assert result < 5.0 # make sure snr increases with time result = funcs.signal_to_noise_oir_ccd(2, 25, 0, 0, 0, 1) assert result > 5.0 def test_bootstrap(): bootarr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) # test general bootstrapping answer = np.array([[7, 4, 8, 5, 7, 0, 3, 7, 8, 5], [4, 8, 8, 3, 6, 5, 2, 8, 6, 2]]) with NumpyRNGContext(42): assert_equal(answer, funcs.bootstrap(bootarr, 2)) # test with a bootfunction with NumpyRNGContext(42): bootresult = np.mean(funcs.bootstrap(bootarr, 10000, bootfunc=np.mean)) assert_allclose(np.mean(bootarr), bootresult, atol=0.01) @pytest.mark.skipif('not HAS_SCIPY') def test_bootstrap_multiple_outputs(): from scipy.stats import spearmanr # test a bootfunc with several output values # return just bootstrapping with one output from bootfunc with NumpyRNGContext(42): bootarr = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9, 0], [4, 8, 8, 3, 6, 5, 2, 8, 6, 2]]).T answer = np.array((0.19425, 0.02094)) def bootfunc(x): return spearmanr(x)[0] bootresult = funcs.bootstrap(bootarr, 2, bootfunc=bootfunc) assert_allclose(answer, bootresult, atol=1e-3) # test a bootfunc with several output values # return just bootstrapping with the second output from bootfunc with NumpyRNGContext(42): bootarr = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9, 0], [4, 8, 8, 3, 6, 5, 2, 8, 6, 2]]).T answer = np.array((0.5907, 0.9541)) def bootfunc(x): return spearmanr(x)[1] bootresult = funcs.bootstrap(bootarr, 2, bootfunc=bootfunc) assert_allclose(answer, bootresult, atol=1e-3) # return just bootstrapping with two outputs from bootfunc with NumpyRNGContext(42): answer = np.array(((0.1942, 0.5907), (0.0209, 0.9541), (0.4286, 0.2165))) def bootfunc(x): return spearmanr(x) bootresult = funcs.bootstrap(bootarr, 3, bootfunc=bootfunc) assert bootresult.shape == (3, 2) assert_allclose(answer, bootresult, atol=1e-3) def test_mad_std(): with NumpyRNGContext(12345): data = normal(5, 2, size=(100, 100)) assert_allclose(funcs.mad_std(data), 2.0, rtol=0.05) @pytest.mark.xfail() def test_mad_std_scalar_return(): with NumpyRNGContext(12345): data = normal(5, 2, size=(10, 10)) # make a masked array with no masked points data = np.ma.masked_where(np.isnan(data), data) rslt = funcs.mad_std(data) # want a scalar result, NOT a masked array assert np.isscalar(rslt) data[5, 5] = np.nan rslt = funcs.mad_std(data, ignore_nan=True) assert np.isscalar(rslt) with catch_warnings(): rslt = funcs.mad_std(data) assert np.isscalar(rslt) assert not np.isnan(rslt) def test_mad_std_warns(): with NumpyRNGContext(12345): data = normal(5, 2, size=(10, 10)) data[5, 5] = np.nan with catch_warnings() as warns: rslt = funcs.mad_std(data, ignore_nan=False) assert np.isnan(rslt) def test_mad_std_withnan(): with NumpyRNGContext(12345): data = np.empty([102, 102]) data[:] = np.nan data[1:-1, 1:-1] = normal(5, 2, size=(100, 100)) assert_allclose(funcs.mad_std(data, ignore_nan=True), 2.0, rtol=0.05) assert np.isnan(funcs.mad_std([1, 2, 3, 4, 5, np.nan])) assert_allclose(funcs.mad_std([1, 2, 3, 4, 5, np.nan], ignore_nan=True), 1.482602218505602) def test_mad_std_with_axis(): data = np.array([[1, 2, 3, 4], [4, 3, 2, 1]]) # results follow data symmetry result_axis0 = np.array([2.22390333, 0.74130111, 0.74130111, 2.22390333]) result_axis1 = np.array([1.48260222, 1.48260222]) assert_allclose(funcs.mad_std(data, axis=0), result_axis0) assert_allclose(funcs.mad_std(data, axis=1), result_axis1) def test_mad_std_with_axis_and_nan(): data = np.array([[1, 2, 3, 4, np.nan], [4, 3, 2, 1, np.nan]]) # results follow data symmetry result_axis0 = np.array([2.22390333, 0.74130111, 0.74130111, 2.22390333, np.nan]) result_axis1 = np.array([1.48260222, 1.48260222]) assert_allclose(funcs.mad_std(data, axis=0, ignore_nan=True), result_axis0) assert_allclose(funcs.mad_std(data, axis=1, ignore_nan=True), result_axis1) def test_mad_std_with_axis_and_nan_array_type(): # mad_std should return a masked array if given one, and not otherwise data = np.array([[1, 2, 3, 4, np.nan], [4, 3, 2, 1, np.nan]]) result = funcs.mad_std(data, axis=0, ignore_nan=True) assert not np.ma.isMaskedArray(result) data = np.ma.masked_where(np.isnan(data), data) result = funcs.mad_std(data, axis=0, ignore_nan=True) assert np.ma.isMaskedArray(result) def test_gaussian_fwhm_to_sigma(): fwhm = (2.0 * np.sqrt(2.0 * np.log(2.0))) assert_allclose(funcs.gaussian_fwhm_to_sigma * fwhm, 1.0, rtol=1.0e-6) def test_gaussian_sigma_to_fwhm(): sigma = 1.0 / (2.0 * np.sqrt(2.0 * np.log(2.0))) assert_allclose(funcs.gaussian_sigma_to_fwhm * sigma, 1.0, rtol=1.0e-6) def test_gaussian_sigma_to_fwhm_to_sigma(): assert_allclose(funcs.gaussian_fwhm_to_sigma * funcs.gaussian_sigma_to_fwhm, 1.0) def test_poisson_conf_interval_rootn(): assert_allclose(funcs.poisson_conf_interval(16, interval='root-n'), (12, 20)) @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.parametrize('interval', ['root-n-0', 'pearson', 'sherpagehrels', 'frequentist-confidence']) def test_poisson_conf_large(interval): n = 100 assert_allclose(funcs.poisson_conf_interval(n, interval='root-n'), funcs.poisson_conf_interval(n, interval=interval), rtol=2e-2) def test_poisson_conf_array_rootn0_zero(): n = np.zeros((3, 4, 5)) assert_allclose(funcs.poisson_conf_interval(n, interval='root-n-0'), funcs.poisson_conf_interval(n[0, 0, 0], interval='root-n-0')[:, None, None, None] * np.ones_like(n)) assert not np.any(np.isnan( funcs.poisson_conf_interval(n, interval='root-n-0'))) @pytest.mark.skipif('not HAS_SCIPY') def test_poisson_conf_array_frequentist_confidence_zero(): n = np.zeros((3, 4, 5)) assert_allclose( funcs.poisson_conf_interval(n, interval='frequentist-confidence'), funcs.poisson_conf_interval(n[0, 0, 0], interval='frequentist-confidence')[:, None, None, None] * np.ones_like(n)) assert not np.any(np.isnan( funcs.poisson_conf_interval(n, interval='root-n-0'))) def test_poisson_conf_list_rootn0_zero(): n = [0, 0, 0] assert_allclose(funcs.poisson_conf_interval(n, interval='root-n-0'), [[0, 0, 0], [1, 1, 1]]) assert not np.any(np.isnan( funcs.poisson_conf_interval(n, interval='root-n-0'))) def test_poisson_conf_array_rootn0(): n = 7 * np.ones((3, 4, 5)) assert_allclose(funcs.poisson_conf_interval(n, interval='root-n-0'), funcs.poisson_conf_interval(n[0, 0, 0], interval='root-n-0')[:, None, None, None] * np.ones_like(n)) n[1, 2, 3] = 0 assert not np.any(np.isnan( funcs.poisson_conf_interval(n, interval='root-n-0'))) @pytest.mark.skipif('not HAS_SCIPY') def test_poisson_conf_array_fc(): n = 7 * np.ones((3, 4, 5)) assert_allclose( funcs.poisson_conf_interval(n, interval='frequentist-confidence'), funcs.poisson_conf_interval(n[0, 0, 0], interval='frequentist-confidence')[:, None, None, None] * np.ones_like(n)) n[1, 2, 3] = 0 assert not np.any(np.isnan( funcs.poisson_conf_interval(n, interval='frequentist-confidence'))) @pytest.mark.skipif('not HAS_SCIPY') def test_poisson_conf_frequentist_confidence_gehrels(): """Test intervals against those published in Gehrels 1986""" nlh = np.array([(0, 0, 1.841), (1, 0.173, 3.300), (2, 0.708, 4.638), (3, 1.367, 5.918), (4, 2.086, 7.163), (5, 2.840, 8.382), (6, 3.620, 9.584), (7, 4.419, 10.77), (8, 5.232, 11.95), (9, 6.057, 13.11), (10, 6.891, 14.27), ]) assert_allclose( funcs.poisson_conf_interval(nlh[:, 0], interval='frequentist-confidence'), nlh[:, 1:].T, rtol=0.001, atol=0.001) @pytest.mark.skipif('not HAS_SCIPY') def test_poisson_conf_frequentist_confidence_gehrels_2sigma(): """Test intervals against those published in Gehrels 1986 Note: I think there's a typo (transposition of digits) in Gehrels 1986, specifically for the two-sigma lower limit for 3 events; they claim 0.569 but this function returns 0.59623... """ nlh = np.array([(0, 2, 0, 3.783), (1, 2, 2.30e-2, 5.683), (2, 2, 0.230, 7.348), (3, 2, 0.596, 8.902), (4, 2, 1.058, 10.39), (5, 2, 1.583, 11.82), (6, 2, 2.153, 13.22), (7, 2, 2.758, 14.59), (8, 2, 3.391, 15.94), (9, 2, 4.046, 17.27), (10, 2, 4.719, 18.58)]) assert_allclose( funcs.poisson_conf_interval(nlh[:, 0], sigma=2, interval='frequentist-confidence').T, nlh[:, 2:], rtol=0.01) @pytest.mark.skipif('not HAS_SCIPY') def test_poisson_conf_frequentist_confidence_gehrels_3sigma(): """Test intervals against those published in Gehrels 1986""" nlh = np.array([(0, 3, 0, 6.608), (1, 3, 1.35e-3, 8.900), (2, 3, 5.29e-2, 10.87), (3, 3, 0.212, 12.68), (4, 3, 0.465, 14.39), (5, 3, 0.792, 16.03), (6, 3, 1.175, 17.62), (7, 3, 1.603, 19.17), (8, 3, 2.068, 20.69), (9, 3, 2.563, 22.18), (10, 3, 3.084, 23.64), ]) assert_allclose( funcs.poisson_conf_interval(nlh[:, 0], sigma=3, interval='frequentist-confidence').T, nlh[:, 2:], rtol=0.01, verbose=True) @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.parametrize('n', [0, 1, 2, 3, 10, 20, 100]) def test_poisson_conf_gehrels86(n): assert_allclose( funcs.poisson_conf_interval(n, interval='sherpagehrels')[1], funcs.poisson_conf_interval(n, interval='frequentist-confidence')[1], rtol=0.02) @pytest.mark.skipif('not HAS_SCIPY') def test_scipy_poisson_limit(): '''Test that the lower-level routine gives the snae number. Test numbers are from table1 1, 3 in Kraft, Burrows and Nousek in `ApJ 374, 344 (1991) <http://adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ ''' assert_allclose(funcs._scipy_kraft_burrows_nousek(5., 2.5, .99), (0, 10.67), rtol=1e-3) conf = funcs.poisson_conf_interval([5., 6.], 'kraft-burrows-nousek', background=[2.5, 2.], conflevel=[.99, .9]) assert_allclose(conf[:, 0], (0, 10.67), rtol=1e-3) assert_allclose(conf[:, 1], (0.81, 8.99), rtol=5e-3) @pytest.mark.skipif('not HAS_MPMATH') def test_mpmath_poisson_limit(): assert_allclose(funcs._mpmath_kraft_burrows_nousek(6., 2., .9), (0.81, 8.99), rtol=5e-3) assert_allclose(funcs._mpmath_kraft_burrows_nousek(5., 2.5, .99), (0, 10.67), rtol=1e-3) @pytest.mark.skipif('not HAS_SCIPY') def test_poisson_conf_value_errors(): with pytest.raises(ValueError) as e: funcs.poisson_conf_interval([5, 6], 'root-n', sigma=2) assert 'Only sigma=1 supported' in str(e.value) with pytest.raises(ValueError) as e: funcs.poisson_conf_interval([5, 6], 'pearson', background=[2.5, 2.]) assert 'background not supported' in str(e.value) with pytest.raises(ValueError) as e: funcs.poisson_conf_interval([5, 6], 'sherpagehrels', conflevel=[2.5, 2.]) assert 'conflevel not supported' in str(e.value) with pytest.raises(ValueError) as e: funcs.poisson_conf_interval(1, 'foo') assert 'Invalid method' in str(e.value) @pytest.mark.skipif('not HAS_SCIPY') def test_poisson_conf_kbn_value_errors(): with pytest.raises(ValueError) as e: funcs.poisson_conf_interval(5., 'kraft-burrows-nousek', background=2.5, conflevel=99) assert 'number between 0 and 1' in str(e.value) with pytest.raises(ValueError) as e: funcs.poisson_conf_interval(5., 'kraft-burrows-nousek', background=2.5) assert 'Set conflevel for method' in str(e.value) with pytest.raises(ValueError) as e: funcs.poisson_conf_interval(5., 'kraft-burrows-nousek', background=-2.5, conflevel=.99) assert 'Background must be' in str(e.value) @pytest.mark.skipif('HAS_SCIPY or HAS_MPMATH') def test_poisson_limit_nodependencies(): with pytest.raises(ImportError): funcs.poisson_conf_interval(20., interval='kraft-burrows-nousek', background=10., conflevel=.95) @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.parametrize('N', [10, 100, 1000, 10000]) def test_uniform(N): with NumpyRNGContext(12345): assert funcs.kuiper(np.random.random(N))[1] > 0.01 @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.parametrize('N,M', [(100, 100), (20, 100), (100, 20), (10, 20), (5, 5), (1000, 100)]) def test_kuiper_two_uniform(N, M): with NumpyRNGContext(12345): assert funcs.kuiper_two(np.random.random(N), np.random.random(M))[1] > 0.01 @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.parametrize('N,M', [(100, 100), (20, 100), (100, 20), (10, 20), (5, 5), (1000, 100)]) def test_kuiper_two_nonuniform(N, M): with NumpyRNGContext(12345): assert funcs.kuiper_two(np.random.random(N)**2, np.random.random(M)**2)[1] > 0.01 @pytest.mark.skipif('not HAS_SCIPY') def test_detect_kuiper_two_different(): with NumpyRNGContext(12345): D, f = funcs.kuiper_two(np.random.random(500) * 0.5, np.random.random(500)) assert f < 0.01 @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.parametrize('N,M', [(100, 100), (20, 100), (100, 20), (10, 20), (5, 5), (1000, 100)]) def test_fpp_kuiper_two(N, M): with NumpyRNGContext(12345): R = 100 fpp = 0.05 fps = 0 for i in range(R): D, f = funcs.kuiper_two(np.random.random(N), np.random.random(M)) if f < fpp: fps += 1 assert scipy.stats.binom(R, fpp).sf(fps - 1) > 0.005 assert scipy.stats.binom(R, fpp).cdf(fps - 1) > 0.005 @pytest.mark.skipif('not HAS_SCIPY') def test_histogram(): with NumpyRNGContext(1234): a, b = 0.3, 3.14 s = np.random.uniform(a, b, 10000) % 1 b, w = funcs.fold_intervals([(a, b, 1. / (b - a))]) h = funcs.histogram_intervals(16, b, w) nn, bb = np.histogram(s, bins=len(h), range=(0, 1)) uu = np.sqrt(nn) nn, uu = len(h) * nn / h / len(s), len(h) * uu / h / len(s) c2 = np.sum(((nn - 1) / uu)**2) assert scipy.stats.chi2(len(h)).cdf(c2) > 0.01 assert scipy.stats.chi2(len(h)).sf(c2) > 0.01 @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.parametrize("ii,rr", [ ((4, (0, 1), (1,)), (1, 1, 1, 1)), ((2, (0, 1), (1,)), (1, 1)), ((4, (0, 0.5, 1), (1, 1)), (1, 1, 1, 1)), ((4, (0, 0.5, 1), (1, 2)), (1, 1, 2, 2)), ((3, (0, 0.5, 1), (1, 2)), (1, 1.5, 2)), ]) def test_histogram_intervals_known(ii, rr): with NumpyRNGContext(1234): assert_allclose(funcs.histogram_intervals(*ii), rr) @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.parametrize('N,m,p', [(100, 10000, 0.01), (300, 10000, 0.001), (10, 10000, 0.001), ]) def test_uniform_binomial(N, m, p): """Check that the false positive probability is right In particular, run m trials with N uniformly-distributed photons and check that the number of false positives is consistent with a binomial distribution. The more trials, the tighter the bounds but the longer the runtime. """ with NumpyRNGContext(1234): fpps = [funcs.kuiper(np.random.random(N))[1] for i in range(m)] assert (scipy.stats.binom(n=m, p=p).ppf(0.01) < len([fpp for fpp in fpps if fpp < p]) < scipy.stats.binom(n=m, p=p).ppf(0.99))
cf8b6ab9acdde18350685635ba735a919cb24cd15b6ebfcd262e9ff919ddd04d
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from numpy.random import randn, normal from numpy.testing import assert_equal from numpy.testing.utils import assert_allclose from ..biweight import (biweight_location, biweight_scale, biweight_midvariance, biweight_midcovariance, biweight_midcorrelation) from ...tests.helper import catch_warnings from ...utils.misc import NumpyRNGContext def test_biweight_location(): with NumpyRNGContext(12345): # test that it runs randvar = randn(10000) cbl = biweight_location(randvar) assert abs(cbl - 0) < 1e-2 def test_biweight_location_small(): cbl = biweight_location([1, 3, 5, 500, 2]) assert abs(cbl - 2.745) < 1e-3 def test_biweight_location_axis(): """Test a 2D array with the axis keyword.""" with NumpyRNGContext(12345): ny = 100 nx = 200 data = normal(5, 2, (ny, nx)) bw = biweight_location(data, axis=0) bwi = [] for i in range(nx): bwi.append(biweight_location(data[:, i])) bwi = np.array(bwi) assert_allclose(bw, bwi) bw = biweight_location(data, axis=1) bwi = [] for i in range(ny): bwi.append(biweight_location(data[i, :])) bwi = np.array(bwi) assert_allclose(bw, bwi) def test_biweight_location_axis_3d(): """Test a 3D array with the axis keyword.""" with NumpyRNGContext(12345): nz = 3 ny = 4 nx = 5 data = normal(5, 2, (nz, ny, nx)) bw = biweight_location(data, axis=0) assert bw.shape == (ny, nx) y = 0 bwi = [] for i in range(nx): bwi.append(biweight_location(data[:, y, i])) bwi = np.array(bwi) assert_allclose(bw[y], bwi) def test_biweight_scale(): # NOTE: biweight_scale is covered by biweight_midvariance tests data = [1, 3, 5, 500, 2] scl = biweight_scale(data) var = biweight_midvariance(data) assert_allclose(scl, np.sqrt(var)) def test_biweight_midvariance(): with NumpyRNGContext(12345): # test that it runs randvar = randn(10000) var = biweight_midvariance(randvar) assert_allclose(var, 1.0, rtol=0.02) def test_biweight_midvariance_small(): data = [1, 3, 5, 500, 2] var = biweight_midvariance(data) assert_allclose(var, 2.9238456) # verified with R var = biweight_midvariance(data, modify_sample_size=True) assert_allclose(var, 2.3390765) def test_biweight_midvariance_5127(): # test a regression introduced in #5127 rand = np.random.RandomState(12345) data = rand.normal(loc=0., scale=20., size=(100, 100)) var = biweight_midvariance(data) assert_allclose(var, 406.86938710817344) # verified with R def test_biweight_midvariance_axis(): """Test a 2D array with the axis keyword.""" with NumpyRNGContext(12345): ny = 100 nx = 200 data = normal(5, 2, (ny, nx)) bw = biweight_midvariance(data, axis=0) bwi = [] for i in range(nx): bwi.append(biweight_midvariance(data[:, i])) bwi = np.array(bwi) assert_allclose(bw, bwi) bw = biweight_midvariance(data, axis=1) bwi = [] for i in range(ny): bwi.append(biweight_midvariance(data[i, :])) bwi = np.array(bwi) assert_allclose(bw, bwi) def test_biweight_midvariance_axis_3d(): """Test a 3D array with the axis keyword.""" with NumpyRNGContext(12345): nz = 3 ny = 4 nx = 5 data = normal(5, 2, (nz, ny, nx)) bw = biweight_midvariance(data, axis=0) assert bw.shape == (ny, nx) y = 0 bwi = [] for i in range(nx): bwi.append(biweight_midvariance(data[:, y, i])) bwi = np.array(bwi) assert_allclose(bw[y], bwi) def test_biweight_midcovariance_1d(): d = [0, 1, 2] cov = biweight_midcovariance(d) var = biweight_midvariance(d) assert_allclose(cov, [[var]]) def test_biweight_midcovariance_2d(): d = [[0, 1, 2], [2, 1, 0]] cov = biweight_midcovariance(d) val = 0.70121809 assert_allclose(cov, [[val, -val], [-val, val]]) # verified with R d = [[5, 1, 10], [500, 5, 2]] cov = biweight_midcovariance(d) assert_allclose(cov, [[14.54159077, -7.79026256], # verified with R [-7.79026256, 6.92087252]]) cov = biweight_midcovariance(d, modify_sample_size=True) assert_allclose(cov, [[14.54159077, -5.19350838], [-5.19350838, 4.61391501]]) def test_biweight_midcovariance_midvariance(): """ Test that biweight_midcovariance diagonal elements agree with biweight_midvariance. """ rng = np.random.RandomState(1) d = rng.normal(0, 2, size=(100, 3)) cov = biweight_midcovariance(d) var = [biweight_midvariance(a) for a in d] assert_allclose(cov.diagonal(), var) cov2 = biweight_midcovariance(d, modify_sample_size=True) var2 = [biweight_midvariance(a, modify_sample_size=True) for a in d] assert_allclose(cov2.diagonal(), var2) def test_midcovariance_shape(): """ Test that biweight_midcovariance raises error with a 3D array. """ d = np.ones(27).reshape(3, 3, 3) with pytest.raises(ValueError) as e: biweight_midcovariance(d) assert 'The input array must be 2D or 1D.' in str(e.value) def test_midcovariance_M_shape(): """ Test that biweight_midcovariance raises error when M is not a scalar or 1D array. """ d = [0, 1, 2] M = [[0, 1], [2, 3]] with pytest.raises(ValueError) as e: biweight_midcovariance(d, M=M) assert 'M must be a scalar or 1D array.' in str(e.value) def test_biweight_midcovariance_symmetric(): """ Regression test to ensure that midcovariance matrix is symmetric when ``modify_sample_size=True`` (see #5972). """ rng = np.random.RandomState(1) d = rng.gamma(2, 2, size=(3, 500)) cov = biweight_midcovariance(d) assert_equal(cov, cov.T) cov = biweight_midcovariance(d, modify_sample_size=True) assert_equal(cov, cov.T) def test_biweight_midcorrelation(): x = [0, 1, 2] y = [2, 1, 0] assert_allclose(biweight_midcorrelation(x, x), 1.0) assert_allclose(biweight_midcorrelation(x, y), -1.0) x = [5, 1, 10, 12.4, 13.2] y = [500, 5, 2, 7.1, 0.9] # verified with R assert_allclose(biweight_midcorrelation(x, y), -0.14411038976763313) def test_biweight_midcorrelation_inputs(): a1 = np.ones((3, 3)) a2 = np.ones(5) a3 = np.ones(7) with pytest.raises(ValueError) as e: biweight_midcorrelation(a1, a2) assert 'x must be a 1D array.' in str(e.value) with pytest.raises(ValueError) as e: biweight_midcorrelation(a2, a1) assert 'y must be a 1D array.' in str(e.value) with pytest.raises(ValueError) as e: biweight_midcorrelation(a2, a3) assert 'x and y must have the same shape.' in str(e.value) def test_biweight_32bit_runtime_warnings(): """Regression test for #6905.""" with NumpyRNGContext(12345): data = np.random.random(100).astype(np.float32) data[50] = 30000. with catch_warnings(RuntimeWarning) as warning_lines: biweight_scale(data) assert len(warning_lines) == 0 with catch_warnings(RuntimeWarning) as warning_lines: biweight_midvariance(data) assert len(warning_lines) == 0
3effe8113df40811fb015b372108c00e1f45cb655f6878a2c47918332ca29506
""" Utilities for computing periodogram statistics. This is an internal module; users should access this functionality via the ``false_alarm_probability`` and ``false_alarm_level`` methods of the ``astropy.stats.LombScargle`` API. """ from functools import wraps import numpy as np def _weighted_sum(val, dy): if dy is not None: return (val / dy ** 2).sum() else: return val.sum() def _weighted_mean(val, dy): if dy is None: return val.mean() else: return _weighted_sum(val, dy) / _weighted_sum(np.ones_like(val), dy) def _weighted_var(val, dy): return _weighted_mean(val ** 2, dy) - _weighted_mean(val, dy) ** 2 def _gamma(N): from scipy.special import gammaln # Note: this is closely approximated by (1 - 0.75 / N) for large N return np.sqrt(2 / N) * np.exp(gammaln(N / 2) - gammaln((N - 1) / 2)) def _log_gamma(N): from scipy.special import gammaln return 0.5 * np.log(2 / N) + gammaln(N / 2) - gammaln((N - 1) / 2) def vectorize_first_argument(func): @wraps(func) def new_func(x, *args, **kwargs): x = np.asarray(x) return np.array([func(xi, *args, **kwargs) for xi in x.flat]).reshape(x.shape) return new_func def pdf_single(z, N, normalization, dH=1, dK=3): """Probability density function for Lomb-Scargle periodogram Compute the expected probability density function of the periodogram for the null hypothesis - i.e. data consisting of Gaussian noise. Parameters ---------- z : array-like The periodogram value. N : int The number of data points from which the periodogram was computed. normalization : {'standard', 'model', 'log', 'psd'} The periodogram normalization. dH, dK : integers, optional The number of parameters in the null hypothesis and the model. Returns ------- pdf : np.ndarray The expected probability density function. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. All expressions used here are adapted from Table 1 of Baluev 2008 [1]_. References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ z = np.asarray(z) if dK - dH != 2: raise NotImplementedError("Degrees of freedom != 2") Nk = N - dK if normalization == 'psd': return np.exp(-z) elif normalization == 'standard': return 0.5 * Nk * (1 - z) ** (0.5 * Nk - 1) elif normalization == 'model': return 0.5 * Nk * (1 + z) ** (-0.5 * Nk - 1) elif normalization == 'log': return 0.5 * Nk * np.exp(-0.5 * Nk * z) else: raise ValueError("normalization='{0}' is not recognized" "".format(normalization)) def fap_single(z, N, normalization, dH=1, dK=3): """Single-frequency false alarm probability for the Lomb-Scargle periodogram This is equal to 1 - cdf, where cdf is the cumulative distribution. The single-frequency false alarm probability should not be confused with the false alarm probability for the largest peak. Parameters ---------- z : array-like The periodogram value. N : int The number of data points from which the periodogram was computed. normalization : {'standard', 'model', 'log', 'psd'} The periodogram normalization. dH, dK : integers, optional The number of parameters in the null hypothesis and the model. Returns ------- false_alarm_probability : np.ndarray The single-frequency false alarm probability. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. All expressions used here are adapted from Table 1 of Baluev 2008 [1]_. References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ z = np.asarray(z) if dK - dH != 2: raise NotImplementedError("Degrees of freedom != 2") Nk = N - dK if normalization == 'psd': return np.exp(-z) elif normalization == 'standard': return (1 - z) ** (0.5 * Nk) elif normalization == 'model': return (1 + z) ** (-0.5 * Nk) elif normalization == 'log': return np.exp(-0.5 * Nk * z) else: raise ValueError("normalization='{0}' is not recognized" "".format(normalization)) def inv_fap_single(fap, N, normalization, dH=1, dK=3): """Single-frequency inverse false alarm probability This function computes the periodogram value associated with the specified single-frequency false alarm probability. This should not be confused with the false alarm level of the largest peak. Parameters ---------- fap : array-like The false alarm probability. N : int The number of data points from which the periodogram was computed. normalization : {'standard', 'model', 'log', 'psd'} The periodogram normalization. dH, dK : integers, optional The number of parameters in the null hypothesis and the model. Returns ------- z : np.ndarray The periodogram power corresponding to the single-peak false alarm probability. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. All expressions used here are adapted from Table 1 of Baluev 2008 [1]_. References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ fap = np.asarray(fap) if dK - dH != 2: raise NotImplementedError("Degrees of freedom != 2") Nk = N - dK if normalization == 'psd': return -np.log(fap) elif normalization == 'standard': return 1 - fap ** (2 / Nk) elif normalization == 'model': return -1 + fap ** (-2 / Nk) elif normalization == 'log': return -2 / Nk * np.log(fap) else: raise ValueError("normalization='{0}' is not recognized" "".format(normalization)) def cdf_single(z, N, normalization, dH=1, dK=3): """Cumulative distribution for the Lomb-Scargle periodogram Compute the expected cumulative distribution of the periodogram for the null hypothesis - i.e. data consisting of Gaussian noise. Parameters ---------- z : array-like The periodogram value. N : int The number of data points from which the periodogram was computed. normalization : {'standard', 'model', 'log', 'psd'} The periodogram normalization. dH, dK : integers, optional The number of parameters in the null hypothesis and the model. Returns ------- cdf : np.ndarray The expected cumulative distribution function. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. All expressions used here are adapted from Table 1 of Baluev 2008 [1]_. References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ return 1 - fap_single(z, N, normalization=normalization, dH=dH, dK=dK) def tau_davies(Z, fmax, t, y, dy, normalization='standard', dH=1, dK=3): """tau factor for estimating Davies bound (Baluev 2008, Table 1)""" N = len(t) NH = N - dH # DOF for null hypothesis NK = N - dK # DOF for periodic hypothesis Dt = _weighted_var(t, dy) Teff = np.sqrt(4 * np.pi * Dt) # Effective baseline W = fmax * Teff Z = np.asarray(Z) if normalization == 'psd': # 'psd' normalization is same as Baluev's z return W * np.exp(-Z) * np.sqrt(Z) elif normalization == 'standard': # 'standard' normalization is Z = 2/NH * z_1 return (_gamma(NH) * W * (1 - Z) ** (0.5 * (NK - 1)) * np.sqrt(0.5 * NH * Z)) elif normalization == 'model': # 'model' normalization is Z = 2/NK * z_2 return (_gamma(NK) * W * (1 + Z) ** (-0.5 * NK) * np.sqrt(0.5 * NK * Z)) elif normalization == 'log': # 'log' normalization is Z = 2/NK * z_3 return (_gamma(NK) * W * np.exp(-0.5 * Z * (NK - 0.5)) * np.sqrt(NK * np.sinh(0.5 * Z))) else: raise NotImplementedError("normalization={0}".format(normalization)) def fap_naive(Z, fmax, t, y, dy, normalization='standard'): """False Alarm Probability based on estimated number of indep frequencies""" N = len(t) T = max(t) - min(t) N_eff = fmax * T fap_s = fap_single(Z, N, normalization=normalization) # result is 1 - (1 - fap_s) ** N_eff # this is much more precise for small Z / large N return -np.expm1(N_eff * np.log1p(-fap_s)) def inv_fap_naive(fap, fmax, t, y, dy, normalization='standard'): """Inverse FAP based on estimated number of indep frequencies""" fap = np.asarray(fap) N = len(t) T = max(t) - min(t) N_eff = fmax * T #fap_s = 1 - (1 - fap) ** (1 / N_eff) fap_s = -np.expm1(np.log(1 - fap) / N_eff) return inv_fap_single(fap_s, N, normalization) def fap_davies(Z, fmax, t, y, dy, normalization='standard'): """Davies upper-bound to the false alarm probability (Eqn 5 of Baluev 2008) """ N = len(t) fap_s = fap_single(Z, N, normalization=normalization) tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization) return fap_s + tau @vectorize_first_argument def inv_fap_davies(p, fmax, t, y, dy, normalization='standard'): """Inverse of the davies upper-bound""" from scipy import optimize args = (fmax, t, y, dy, normalization) z0 = inv_fap_naive(p, *args) func = lambda z, *args: fap_davies(z, *args) - p res = optimize.root(func, z0, args=args, method='lm') if not res.success: raise ValueError('inv_fap_baluev did not converge for p={0}'.format(p)) return res.x def fap_baluev(Z, fmax, t, y, dy, normalization='standard'): """Alias-free approximation to false alarm probability (Eqn 6 of Baluev 2008) """ fap_s = fap_single(Z, len(t), normalization) tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization) # result is 1 - (1 - fap_s) * np.exp(-tau) # this is much more precise for small numbers return -np.expm1(-tau) + fap_s * np.exp(-tau) @vectorize_first_argument def inv_fap_baluev(p, fmax, t, y, dy, normalization='standard'): """Inverse of the Baluev alias-free approximation""" from scipy import optimize args = (fmax, t, y, dy, normalization) z0 = inv_fap_naive(p, *args) func = lambda z, *args: fap_baluev(z, *args) - p res = optimize.root(func, z0, args=args, method='lm') if not res.success: raise ValueError('inv_fap_baluev did not converge for p={0}'.format(p)) return res.x def _bootstrap_max(t, y, dy, fmax, normalization, random_seed): """Generate a sequence of bootstrap estimates of the max""" from .core import LombScargle rng = np.random.RandomState(random_seed) while True: s = rng.randint(0, len(y), len(y)) # sample with replacement ls_boot = LombScargle(t, y[s], dy if dy is None else dy[s], normalization=normalization) freq, power = ls_boot.autopower(maximum_frequency=fmax) yield power.max() def fap_bootstrap(Z, fmax, t, y, dy, normalization='standard', n_bootstraps=1000, random_seed=None): """Bootstrap estimate of the false alarm probability""" pmax = np.fromiter(_bootstrap_max(t, y, dy, fmax, normalization, random_seed), float, n_bootstraps) pmax.sort() return 1 - np.searchsorted(pmax, Z) / len(pmax) def inv_fap_bootstrap(fap, fmax, t, y, dy, normalization='standard', n_bootstraps=1000, random_seed=None): """Bootstrap estimate of the inverse false alarm probability""" fap = np.asarray(fap) pmax = np.fromiter(_bootstrap_max(t, y, dy, fmax, normalization, random_seed), float, n_bootstraps) pmax.sort() return pmax[np.clip(np.floor((1 - fap) * len(pmax)).astype(int), 0, len(pmax) - 1)] METHODS = {'single': fap_single, 'naive': fap_naive, 'davies': fap_davies, 'baluev': fap_baluev, 'bootstrap': fap_bootstrap} def false_alarm_probability(Z, fmax, t, y, dy, normalization='standard', method='baluev', method_kwds=None): """Compute the approximate false alarm probability for periodogram peaks Z This gives an estimate of the false alarm probability for the largest value in a periodogram, based on the null hypothesis of non-varying data with Gaussian noise. The true probability cannot be computed analytically, so each method available here is an approximation to the true value. Parameters ---------- Z : array-like The periodogram value. fmax : float The maximum frequency of the periodogram. t, y, dy : array-like The data times, values, and errors. normalization : {'standard', 'model', 'log', 'psd'}, optional The periodogram normalization. method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional The approximation method to use. method_kwds : dict, optional Additional method-specific keywords. Returns ------- false_alarm_probability : np.ndarray The false alarm probability. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. See Also -------- false_alarm_level : compute the periodogram level for a particular fap References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ if method == 'single': return fap_single(Z, len(t), normalization) elif method not in METHODS: raise ValueError("Unrecognized method: {0}".format(method)) method = METHODS[method] method_kwds = method_kwds or {} return method(Z, fmax, t, y, dy, normalization, **method_kwds) INV_METHODS = {'single': inv_fap_single, 'naive': inv_fap_naive, 'davies': inv_fap_davies, 'baluev': inv_fap_baluev, 'bootstrap': inv_fap_bootstrap} def false_alarm_level(p, fmax, t, y, dy, normalization, method='baluev', method_kwds=None): """Compute the approximate periodogram level given a false alarm probability This gives an estimate of the periodogram level corresponding to a specified false alarm probability for the largest peak, assuming a null hypothesis of non-varying data with Gaussian noise. The true level cannot be computed analytically, so each method available here is an approximation to the true value. Parameters ---------- p : array-like The false alarm probability (0 < p < 1). fmax : float The maximum frequency of the periodogram. t, y, dy : arrays The data times, values, and errors. normalization : {'standard', 'model', 'log', 'psd'}, optional The periodogram normalization. method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional The approximation method to use. method_kwds : dict, optional Additional method-specific keywords. Returns ------- z : np.ndarray The periodogram level. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. See Also -------- false_alarm_probability : compute the fap for a given periodogram level References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ if method == 'single': return inv_fap_single(p, len(t), normalization) elif method not in INV_METHODS: raise ValueError("Unrecognized method: {0}".format(method)) method = INV_METHODS[method] method_kwds = method_kwds or {} return method(p, fmax, t, y, dy, normalization, **method_kwds)
c2128112166d9b4bb915ed8e28833e0221a024cf29d6f53907ed94153356b0d7
"""Main Lomb-Scargle Implementation""" import numpy as np from .implementations import lombscargle, available_methods from .implementations.mle import periodic_fit from . import _statistics from ... import units def has_units(obj): return hasattr(obj, 'unit') def get_unit(obj): return getattr(obj, 'unit', 1) def strip_units(*arrs): strip = lambda a: None if a is None else np.asarray(a) if len(arrs) == 1: return strip(arrs[0]) else: return map(strip, arrs) class LombScargle: """Compute the Lomb-Scargle Periodogram. This implementations here are based on code presented in [1]_ and [2]_; if you use this functionality in an academic application, citation of those works would be appreciated. Parameters ---------- t : array_like or Quantity sequence of observation times y : array_like or Quantity sequence of observations associated with times t dy : float, array_like or Quantity (optional) error or sequence of observational errors associated with times t fit_mean : bool (optional, default=True) if True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage. center_data : bool (optional, default=True) if True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if fit_mean = False nterms : int (optional, default=1) number of terms to use in the Fourier fit normalization : {'standard', 'model', 'log', 'psd'}, optional Normalization to use for the periodogram. Examples -------- Generate noisy periodic data: >>> rand = np.random.RandomState(42) >>> t = 100 * rand.rand(100) >>> y = np.sin(2 * np.pi * t) + rand.randn(100) Compute the Lomb-Scargle periodogram on an automatically-determined frequency grid & find the frequency of max power: >>> frequency, power = LombScargle(t, y).autopower() >>> frequency[np.argmax(power)] # doctest: +FLOAT_CMP 1.0016662310392956 Compute the Lomb-Scargle periodogram at a user-specified frequency grid: >>> freq = np.arange(0.8, 1.3, 0.1) >>> LombScargle(t, y).power(freq) # doctest: +FLOAT_CMP array([0.0204304 , 0.01393845, 0.35552682, 0.01358029, 0.03083737]) If the inputs are astropy Quantities with units, the units will be validated and the outputs will also be Quantities with appropriate units: >>> from astropy import units as u >>> t = t * u.s >>> y = y * u.mag >>> frequency, power = LombScargle(t, y).autopower() >>> frequency.unit Unit("1 / s") >>> power.unit Unit(dimensionless) Note here that the Lomb-Scargle power is always a unitless quantity, because it is related to the :math:`\\chi^2` of the best-fit periodic model at each frequency. References ---------- .. [1] Vanderplas, J., Connolly, A. Ivezic, Z. & Gray, A. *Introduction to astroML: Machine learning for astrophysics*. Proceedings of the Conference on Intelligent Data Understanding (2012) .. [2] VanderPlas, J. & Ivezic, Z. *Periodograms for Multiband Astronomical Time Series*. ApJ 812.1:18 (2015) """ available_methods = available_methods() def __init__(self, t, y, dy=None, fit_mean=True, center_data=True, nterms=1, normalization='standard'): self.t, self.y, self.dy = self._validate_inputs(t, y, dy) self.fit_mean = fit_mean self.center_data = center_data self.nterms = nterms self.normalization = normalization def _validate_inputs(self, t, y, dy): # Validate shapes of inputs if dy is None: t, y = np.broadcast_arrays(t, y, subok=True) else: t, y, dy = np.broadcast_arrays(t, y, dy, subok=True) if t.ndim != 1: raise ValueError("Inputs (t, y, dy) must be 1-dimensional") # validate units of inputs if any is a Quantity if any(has_units(arr) for arr in (t, y, dy)): t, y = map(units.Quantity, (t, y)) if dy is not None: dy = units.Quantity(dy) try: dy = units.Quantity(dy, unit=y.unit) except units.UnitConversionError: raise ValueError("Units of dy not equivalent " "to units of y") return t, y, dy def _validate_frequency(self, frequency): frequency = np.asanyarray(frequency) if has_units(self.t): frequency = units.Quantity(frequency) try: frequency = units.Quantity(frequency, unit=1./self.t.unit) except units.UnitConversionError: raise ValueError("Units of frequency not equivalent to " "units of 1/t") else: if has_units(frequency): raise ValueError("frequency have units while 1/t doesn't.") return frequency def _validate_t(self, t): t = np.asanyarray(t) if has_units(self.t): t = units.Quantity(t) try: t = units.Quantity(t, unit=self.t.unit) except units.UnitConversionError: raise ValueError("Units of t not equivalent to " "units of input self.t") return t def _power_unit(self, norm): if has_units(self.y): if self.dy is None and norm == 'psd': return self.y.unit ** 2 else: return units.dimensionless_unscaled else: return 1 def autofrequency(self, samples_per_peak=5, nyquist_factor=5, minimum_frequency=None, maximum_frequency=None, return_freq_limits=False): """Determine a suitable frequency grid for data. Note that this assumes the peak width is driven by the observational baseline, which is generally a good assumption when the baseline is much larger than the oscillation period. If you are searching for periods longer than the baseline of your observations, this may not perform well. Even with a large baseline, be aware that the maximum frequency returned is based on the concept of "average Nyquist frequency", which may not be useful for irregularly-sampled data. The maximum frequency can be adjusted via the nyquist_factor argument, or through the maximum_frequency argument. Parameters ---------- samples_per_peak : float (optional, default=5) The approximate number of desired samples across the typical peak nyquist_factor : float (optional, default=5) The multiple of the average nyquist frequency used to choose the maximum frequency if maximum_frequency is not provided. minimum_frequency : float (optional) If specified, then use this minimum frequency rather than one chosen based on the size of the baseline. maximum_frequency : float (optional) If specified, then use this maximum frequency rather than one chosen based on the average nyquist frequency. return_freq_limits : bool (optional) if True, return only the frequency limits rather than the full frequency grid. Returns ------- frequency : ndarray or Quantity The heuristically-determined optimal frequency bin """ baseline = self.t.max() - self.t.min() n_samples = self.t.size df = 1.0 / baseline / samples_per_peak if minimum_frequency is None: minimum_frequency = 0.5 * df if maximum_frequency is None: avg_nyquist = 0.5 * n_samples / baseline maximum_frequency = nyquist_factor * avg_nyquist Nf = 1 + int(np.round((maximum_frequency - minimum_frequency) / df)) if return_freq_limits: return minimum_frequency, minimum_frequency + df * (Nf - 1) else: return minimum_frequency + df * np.arange(Nf) def autopower(self, method='auto', method_kwds=None, normalization=None, samples_per_peak=5, nyquist_factor=5, minimum_frequency=None, maximum_frequency=None): """Compute Lomb-Scargle power at automatically-determined frequencies. Parameters ---------- method : string (optional) specify the lomb scargle implementation to use. Options are: - 'auto': choose the best method based on the input - 'fast': use the O[N log N] fast method. Note that this requires evenly-spaced frequencies: by default this will be checked unless ``assume_regular_frequency`` is set to True. - 'slow': use the O[N^2] pure-python implementation - 'cython': use the O[N^2] cython implementation. This is slightly faster than method='slow', but much more memory efficient. - 'chi2': use the O[N^2] chi2/linear-fitting implementation - 'fastchi2': use the O[N log N] chi2 implementation. Note that this requires evenly-spaced frequencies: by default this will be checked unless ``assume_regular_frequency`` is set to True. - 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2] implementation written in C. Note that this does not support heteroskedastic errors. method_kwds : dict (optional) additional keywords to pass to the lomb-scargle method normalization : {'standard', 'model', 'log', 'psd'}, optional If specified, override the normalization specified at instantiation. samples_per_peak : float (optional, default=5) The approximate number of desired samples across the typical peak nyquist_factor : float (optional, default=5) The multiple of the average nyquist frequency used to choose the maximum frequency if maximum_frequency is not provided. minimum_frequency : float (optional) If specified, then use this minimum frequency rather than one chosen based on the size of the baseline. maximum_frequency : float (optional) If specified, then use this maximum frequency rather than one chosen based on the average nyquist frequency. Returns ------- frequency, power : ndarrays The frequency and Lomb-Scargle power """ frequency = self.autofrequency(samples_per_peak=samples_per_peak, nyquist_factor=nyquist_factor, minimum_frequency=minimum_frequency, maximum_frequency=maximum_frequency) power = self.power(frequency, normalization=normalization, method=method, method_kwds=method_kwds, assume_regular_frequency=True) return frequency, power def power(self, frequency, normalization=None, method='auto', assume_regular_frequency=False, method_kwds=None): """Compute the Lomb-Scargle power at the given frequencies. Parameters ---------- frequency : array_like or Quantity frequencies (not angular frequencies) at which to evaluate the periodogram. Note that in order to use method='fast', frequencies must be regularly-spaced. method : string (optional) specify the lomb scargle implementation to use. Options are: - 'auto': choose the best method based on the input - 'fast': use the O[N log N] fast method. Note that this requires evenly-spaced frequencies: by default this will be checked unless ``assume_regular_frequency`` is set to True. - 'slow': use the O[N^2] pure-python implementation - 'cython': use the O[N^2] cython implementation. This is slightly faster than method='slow', but much more memory efficient. - 'chi2': use the O[N^2] chi2/linear-fitting implementation - 'fastchi2': use the O[N log N] chi2 implementation. Note that this requires evenly-spaced frequencies: by default this will be checked unless ``assume_regular_frequency`` is set to True. - 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2] implementation written in C. Note that this does not support heteroskedastic errors. assume_regular_frequency : bool (optional) if True, assume that the input frequency is of the form freq = f0 + df * np.arange(N). Only referenced if method is 'auto' or 'fast'. normalization : {'standard', 'model', 'log', 'psd'}, optional If specified, override the normalization specified at instantiation. fit_mean : bool (optional, default=True) If True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage. center_data : bool (optional, default=True) If True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if fit_mean = False. method_kwds : dict (optional) additional keywords to pass to the lomb-scargle method Returns ------- power : ndarray The Lomb-Scargle power at the specified frequency """ if normalization is None: normalization = self.normalization frequency = self._validate_frequency(frequency) power = lombscargle(*strip_units(self.t, self.y, self.dy), frequency=strip_units(frequency), center_data=self.center_data, fit_mean=self.fit_mean, nterms=self.nterms, normalization=normalization, method=method, method_kwds=method_kwds, assume_regular_frequency=assume_regular_frequency) return power * self._power_unit(normalization) def model(self, t, frequency): """Compute the Lomb-Scargle model at the given frequency. Parameters ---------- t : array_like or Quantity, length n_samples times at which to compute the model frequency : float the frequency for the model Returns ------- y : np.ndarray, length n_samples The model fit corresponding to the input times """ frequency = self._validate_frequency(frequency) t = self._validate_t(t) y_fit = periodic_fit(*strip_units(self.t, self.y, self.dy), frequency=strip_units(frequency), t_fit=strip_units(t), center_data=self.center_data, fit_mean=self.fit_mean, nterms=self.nterms) return y_fit * get_unit(self.y) def distribution(self, power, cumulative=False): """Expected periodogram distribution under the null hypothesis. This computes the expected probability distribution or cumulative probability distribution of periodogram power, under the null hypothesis of a non-varying signal with Gaussian noise. Note that this is not the same as the expected distribution of peak values; for that see the ``false_alarm_probability()`` method. Parameters ---------- power : array_like The periodogram power at which to compute the distribution. cumulative : bool (optional) If True, then return the cumulative distribution. See Also -------- false_alarm_probability false_alarm_level Returns ------- dist : np.ndarray The probability density or cumulative probability associated with the provided powers. """ dH = 1 if self.fit_mean or self.center_data else 0 dK = dH + 2 * self.nterms dist = _statistics.cdf_single if cumulative else _statistics.pdf_single return dist(power, len(self.t), self.normalization, dH=dH, dK=dK) def false_alarm_probability(self, power, method='baluev', samples_per_peak=5, nyquist_factor=5, minimum_frequency=None, maximum_frequency=None, method_kwds=None): """False alarm probability of periodogram maxima under the null hypothesis. This gives an estimate of the false alarm probability given the height of the largest peak in the periodogram, based on the null hypothesis of non-varying data with Gaussian noise. Parameters ---------- power : array-like The periodogram value. method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional The approximation method to use. maximum_frequency : float The maximum frequency of the periodogram. method_kwds : dict (optional) Additional method-specific keywords. Returns ------- false_alarm_probability : np.ndarray The false alarm probability Notes ----- The true probability distribution for the largest peak cannot be determined analytically, so each method here provides an approximation to the value. The available methods are: - "baluev" (default): the upper-limit to the alias-free probability, using the approach of Baluev (2008) [1]_. - "davies" : the Davies upper bound from Baluev (2008) [1]_. - "naive" : the approximate probability based on an estimated effective number of independent frequencies. - "bootstrap" : the approximate probability based on bootstrap resamplings of the input data. Note also that for normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. See Also -------- distribution false_alarm_level References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ if self.nterms != 1: raise NotImplementedError("false alarm probability is not " "implemented for multiterm periodograms.") if not (self.fit_mean or self.center_data): raise NotImplementedError("false alarm probability is implemented " "only for periodograms of centered data.") fmin, fmax = self.autofrequency(samples_per_peak=samples_per_peak, nyquist_factor=nyquist_factor, minimum_frequency=minimum_frequency, maximum_frequency=maximum_frequency, return_freq_limits=True) return _statistics.false_alarm_probability(power, fmax=fmax, t=self.t, y=self.y, dy=self.dy, normalization=self.normalization, method=method, method_kwds=method_kwds) def false_alarm_level(self, false_alarm_probability, method='baluev', samples_per_peak=5, nyquist_factor=5, minimum_frequency=None, maximum_frequency=None, method_kwds=None): """Level of maximum at a given false alarm probability. This gives an estimate of the periodogram level corresponding to a specified false alarm probability for the largest peak, assuming a null hypothesis of non-varying data with Gaussian noise. Parameters ---------- false_alarm_probability : array-like The false alarm probability (0 < fap < 1). maximum_frequency : float The maximum frequency of the periodogram. method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional The approximation method to use; default='baluev'. method_kwds : dict, optional Additional method-specific keywords. Returns ------- power : np.ndarray The periodogram peak height corresponding to the specified false alarm probability. Notes ----- The true probability distribution for the largest peak cannot be determined analytically, so each method here provides an approximation to the value. The available methods are: - "baluev" (default): the upper-limit to the alias-free probability, using the approach of Baluev (2008) [1]_. - "davies" : the Davies upper bound from Baluev (2008) [1]_. - "naive" : the approximate probability based on an estimated effective number of independent frequencies. - "bootstrap" : the approximate probability based on bootstrap resamplings of the input data. Note also that for normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. See Also -------- distribution false_alarm_probability References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ if self.nterms != 1: raise NotImplementedError("false alarm probability is not " "implemented for multiterm periodograms.") if not (self.fit_mean or self.center_data): raise NotImplementedError("false alarm probability is implemented " "only for periodograms of centered data.") fmin, fmax = self.autofrequency(samples_per_peak=samples_per_peak, nyquist_factor=nyquist_factor, minimum_frequency=minimum_frequency, maximum_frequency=maximum_frequency, return_freq_limits=True) return _statistics.false_alarm_level(false_alarm_probability, fmax=fmax, t=self.t, y=self.y, dy=self.dy, normalization=self.normalization, method=method, method_kwds=method_kwds)
b9cd7111c3a6ff37fec6d42a638f4f5e392861626599614055458c14efa8a144
import numpy as np NORMALIZATIONS = ['standard', 'psd', 'model', 'log'] def compute_chi2_ref(y, dy=None, center_data=True, fit_mean=True): """Compute the reference chi-square for a particular dataset. Note: this is not valid center_data=False and fit_mean=False. Parameters ---------- y : array_like data values dy : float, array, or None (optional) data uncertainties center_data : boolean specify whether data should be pre-centered fit_mean : boolean specify whether model should fit the mean of the data Returns ------- chi2_ref : float The reference chi-square for the periodogram of this data """ if dy is None: dy = 1 y, dy = np.broadcast_arrays(y, dy) w = dy ** -2.0 if center_data or fit_mean: mu = np.dot(w, y) / w.sum() else: mu = 0 yw = (y - mu) / dy return np.dot(yw, yw) def convert_normalization(Z, N, from_normalization, to_normalization, chi2_ref=None): """Convert power from one normalization to another. This currently only works for standard & floating-mean models. Parameters ---------- Z : array_like the periodogram output N : integer the number of data points from_normalization, to_normalization : strings the normalization to convert from and to. Options are ['standard', 'model', 'log', 'psd'] chi2_ref : float The reference chi-square, required for converting to or from the psd normalization. Returns ------- Z_out : ndarray The periodogram in the new normalization """ Z = np.asarray(Z) from_to = (from_normalization, to_normalization) for norm in from_to: if norm not in NORMALIZATIONS: raise ValueError("{0} is not a valid normalization" "".format(from_normalization)) if from_normalization == to_normalization: return Z if "psd" in from_to and chi2_ref is None: raise ValueError("must supply reference chi^2 when converting " "to or from psd normalization") if from_to == ('log', 'standard'): return 1 - np.exp(-Z) elif from_to == ('standard', 'log'): return -np.log(1 - Z) elif from_to == ('log', 'model'): return np.exp(Z) - 1 elif from_to == ('model', 'log'): return np.log(Z + 1) elif from_to == ('model', 'standard'): return Z / (1 + Z) elif from_to == ('standard', 'model'): return Z / (1 - Z) elif from_normalization == "psd": return convert_normalization(2 / chi2_ref * Z, N, from_normalization='standard', to_normalization=to_normalization) elif to_normalization == "psd": Z_standard = convert_normalization(Z, N, from_normalization=from_normalization, to_normalization='standard') return 0.5 * chi2_ref * Z_standard else: raise NotImplementedError("conversion from '{0}' to '{1}'" "".format(from_normalization, to_normalization))
71c47c5cf5a20ba766bbd5551a213f54a3fa8bf513b1a5d7b0c21d1cfe6b1e7a
import numpy as np from .mle import design_matrix def lombscargle_chi2(t, y, dy, frequency, normalization='standard', fit_mean=True, center_data=True, nterms=1): """Lomb-Scargle Periodogram This implements a chi-squared-based periodogram, which is relatively slow but useful for validating the faster algorithms in the package. Parameters ---------- t, y, dy : array_like (NOT astropy.Quantities) times, values, and errors of the data points. These should be broadcastable to the same shape. frequency : array_like frequencies (not angular frequencies) at which to calculate periodogram normalization : string (optional, default='standard') Normalization to use for the periodogram. Options are 'standard', 'model', 'log', or 'psd'. fit_mean : bool (optional, default=True) if True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage. center_data : bool (optional, default=True) if True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if ``fit_mean = False`` nterms : int (optional, default=1) Number of Fourier terms in the fit Returns ------- power : array_like Lomb-Scargle power associated with each frequency. Units of the result depend on the normalization. References ---------- .. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009) .. [2] W. Press et al, Numerical Recipes in C (2002) .. [3] Scargle, J.D. 1982, ApJ 263:835-853 """ if dy is None: dy = 1 t, y, dy = np.broadcast_arrays(t, y, dy) frequency = np.asarray(frequency) if t.ndim != 1: raise ValueError("t, y, dy should be one dimensional") if frequency.ndim != 1: raise ValueError("frequency should be one-dimensional") w = dy ** -2.0 w /= w.sum() # if fit_mean is true, centering the data now simplifies the math below. if center_data or fit_mean: yw = (y - np.dot(w, y)) / dy else: yw = y / dy chi2_ref = np.dot(yw, yw) # compute the unnormalized model chi2 at each frequency def compute_power(f): X = design_matrix(t, f, dy=dy, bias=fit_mean, nterms=nterms) XTX = np.dot(X.T, X) XTy = np.dot(X.T, yw) return np.dot(XTy.T, np.linalg.solve(XTX, XTy)) p = np.array([compute_power(f) for f in frequency]) if normalization == 'psd': p *= 0.5 elif normalization == 'model': p /= (chi2_ref - p) elif normalization == 'log': p = -np.log(1 - p / chi2_ref) elif normalization == 'standard': p /= chi2_ref else: raise ValueError("normalization='{0}' " "not recognized".format(normalization)) return p
d07a16faf8d3f2bc10b6da34e3e66bfce56e47d3c3e40bbf27c6a71d52674d4a
""" Main Lomb-Scargle Implementation The ``lombscargle`` function here is essentially a sophisticated switch statement for the various implementations available in this submodule """ __all__ = ['lombscargle', 'available_methods'] import warnings import numpy as np from .slow_impl import lombscargle_slow from .fast_impl import lombscargle_fast from .scipy_impl import lombscargle_scipy from .chi2_impl import lombscargle_chi2 from .fastchi2_impl import lombscargle_fastchi2 from .cython_impl import lombscargle_cython METHODS = {'slow': lombscargle_slow, 'fast': lombscargle_fast, 'chi2': lombscargle_chi2, 'scipy': lombscargle_scipy, 'fastchi2': lombscargle_fastchi2, 'cython': lombscargle_cython} def available_methods(): methods = ['auto', 'slow', 'chi2', 'cython', 'fast', 'fastchi2'] # Scipy required for scipy algorithm (obviously) try: import scipy except ImportError: pass else: methods.append('scipy') return methods def _is_regular(frequency): frequency = np.asarray(frequency) if frequency.ndim != 1: return False elif len(frequency) == 1: return True else: diff = np.diff(frequency) return np.allclose(diff[0], diff) def _get_frequency_grid(frequency, assume_regular_frequency=False): """Utility to get grid parameters from a frequency array Parameters ---------- frequency : array_like or Quantity input frequency grid assume_regular_frequency : bool (default = False) if True, then do not check whether frequency is a regular grid Returns ------- f0, df, N : scalars Parameters such that all(frequency == f0 + df * np.arange(N)) """ frequency = np.asarray(frequency) if frequency.ndim != 1: raise ValueError("frequency grid must be 1 dimensional") elif len(frequency) == 1: return frequency[0], frequency[0], 1 elif not (assume_regular_frequency or _is_regular(frequency)): raise ValueError("frequency must be a regular grid") return frequency[0], frequency[1] - frequency[0], len(frequency) def validate_method(method, dy, fit_mean, nterms, frequency, assume_regular_frequency): """ Validate the method argument, and if method='auto' choose the appropriate method """ methods = available_methods() prefer_fast = (len(frequency) > 200 and (assume_regular_frequency or _is_regular(frequency))) prefer_scipy = 'scipy' in methods and dy is None and not fit_mean # automatically choose the appropriate method if method == 'auto': if nterms != 1: if prefer_fast: method = 'fastchi2' else: method = 'chi2' elif prefer_fast: method = 'fast' elif prefer_scipy: method = 'scipy' else: method = 'cython' if method not in METHODS: raise ValueError("invalid method: {0}".format(method)) return method def lombscargle(t, y, dy=None, frequency=None, method='auto', assume_regular_frequency=False, normalization='standard', fit_mean=True, center_data=True, method_kwds=None, nterms=1): """ Compute the Lomb-scargle Periodogram with a given method. Parameters ---------- t : array_like sequence of observation times y : array_like sequence of observations associated with times t dy : float or array_like (optional) error or sequence of observational errors associated with times t frequency : array_like frequencies (not angular frequencies) at which to evaluate the periodogram. If not specified, optimal frequencies will be chosen using a heuristic which will attempt to provide sufficient frequency range and sampling so that peaks will not be missed. Note that in order to use method='fast', frequencies must be regularly spaced. method : string (optional) specify the lomb scargle implementation to use. Options are: - 'auto': choose the best method based on the input - 'fast': use the O[N log N] fast method. Note that this requires evenly-spaced frequencies: by default this will be checked unless ``assume_regular_frequency`` is set to True. - `slow`: use the O[N^2] pure-python implementation - `chi2`: use the O[N^2] chi2/linear-fitting implementation - `fastchi2`: use the O[N log N] chi2 implementation. Note that this requires evenly-spaced frequencies: by default this will be checked unless `assume_regular_frequency` is set to True. - `scipy`: use ``scipy.signal.lombscargle``, which is an O[N^2] implementation written in C. Note that this does not support heteroskedastic errors. assume_regular_frequency : bool (optional) if True, assume that the input frequency is of the form freq = f0 + df * np.arange(N). Only referenced if method is 'auto' or 'fast'. normalization : string (optional, default='standard') Normalization to use for the periodogram. Options are 'standard' or 'psd'. fit_mean : bool (optional, default=True) if True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage. center_data : bool (optional, default=True) if True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if `fit_mean = False` method_kwds : dict (optional) additional keywords to pass to the lomb-scargle method nterms : int (default=1) number of Fourier terms to use in the periodogram. Not supported with every method. Returns ------- PLS : array_like Lomb-Scargle power associated with each frequency omega """ # frequencies should be one-dimensional arrays output_shape = frequency.shape frequency = frequency.ravel() # we'll need to adjust args and kwds for each method args = (t, y, dy) kwds = dict(frequency=frequency, center_data=center_data, fit_mean=fit_mean, normalization=normalization, nterms=nterms, **(method_kwds or {})) method = validate_method(method, dy=dy, fit_mean=fit_mean, nterms=nterms, frequency=frequency, assume_regular_frequency=assume_regular_frequency) # scipy doesn't support dy or fit_mean=True if method == 'scipy': if kwds.pop('fit_mean'): raise ValueError("scipy method does not support fit_mean=True") if dy is not None: dy = np.ravel(np.asarray(dy)) if not np.allclose(dy[0], dy): raise ValueError("scipy method only supports " "uniform uncertainties dy") args = (t, y) # fast methods require frequency expressed as a grid if method.startswith('fast'): f0, df, Nf = _get_frequency_grid(kwds.pop('frequency'), assume_regular_frequency) kwds.update(f0=f0, df=df, Nf=Nf) # only chi2 methods support nterms if not method.endswith('chi2'): if kwds.pop('nterms') != 1: raise ValueError("nterms != 1 only supported with 'chi2' " "or 'fastchi2' methods") PLS = METHODS[method](*args, **kwds) return PLS.reshape(output_shape)
73da10777209b7aac812f9d3077373f881a152069a0ffd8c43a501bfc7ca9acb
import numpy as np from .utils import trig_sum def lombscargle_fast(t, y, dy, f0, df, Nf, center_data=True, fit_mean=True, normalization='standard', use_fft=True, trig_sum_kwds=None): """Fast Lomb-Scargle Periodogram This implements the Press & Rybicki method [1]_ for fast O[N log(N)] Lomb-Scargle periodograms. Parameters ---------- t, y, dy : array_like (NOT astropy.Quantities) times, values, and errors of the data points. These should be broadcastable to the same shape. f0, df, Nf : (float, float, int) parameters describing the frequency grid, f = f0 + df * arange(Nf). center_data : bool (default=True) Specify whether to subtract the mean of the data before the fit fit_mean : bool (default=True) If True, then compute the floating-mean periodogram; i.e. let the mean vary with the fit. normalization : string (optional, default='standard') Normalization to use for the periodogram. Options are 'standard', 'model', 'log', or 'psd'. use_fft : bool (default=True) If True, then use the Press & Rybicki O[NlogN] algorithm to compute the result. Otherwise, use a slower O[N^2] algorithm trig_sum_kwds : dict or None (optional) extra keyword arguments to pass to the ``trig_sum`` utility. Options are ``oversampling`` and ``Mfft``. See documentation of ``trig_sum`` for details. Returns ------- power : ndarray Lomb-Scargle power associated with each frequency. Units of the result depend on the normalization. Notes ----- Note that the ``use_fft=True`` algorithm is an approximation to the true Lomb-Scargle periodogram, and as the number of points grows this approximation improves. On the other hand, for very small datasets (<~50 points or so) this approximation may not be useful. References ---------- .. [1] Press W.H. and Rybicki, G.B, "Fast algorithm for spectral analysis of unevenly sampled data". ApJ 1:338, p277, 1989 .. [2] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009) .. [3] W. Press et al, Numerical Recipes in C (2002) """ if dy is None: dy = 1 # Validate and setup input data t, y, dy = np.broadcast_arrays(t, y, dy) if t.ndim != 1: raise ValueError("t, y, dy should be one dimensional") # Validate and setup frequency grid if f0 < 0: raise ValueError("Frequencies must be positive") if df <= 0: raise ValueError("Frequency steps must be positive") if Nf <= 0: raise ValueError("Number of frequencies must be positive") w = dy ** -2.0 w /= w.sum() # Center the data. Even if we're fitting the offset, # this step makes the expressions below more succinct if center_data or fit_mean: y = y - np.dot(w, y) # set up arguments to trig_sum kwargs = dict.copy(trig_sum_kwds or {}) kwargs.update(f0=f0, df=df, use_fft=use_fft, N=Nf) # ---------------------------------------------------------------------- # 1. compute functions of the time-shift tau at each frequency Sh, Ch = trig_sum(t, w * y, **kwargs) S2, C2 = trig_sum(t, w, freq_factor=2, **kwargs) if fit_mean: S, C = trig_sum(t, w, **kwargs) tan_2omega_tau = (S2 - 2 * S * C) / (C2 - (C * C - S * S)) else: tan_2omega_tau = S2 / C2 # This is what we're computing below; the straightforward way is slower # and less stable, so we use trig identities instead # # omega_tau = 0.5 * np.arctan(tan_2omega_tau) # S2w, C2w = np.sin(2 * omega_tau), np.cos(2 * omega_tau) # Sw, Cw = np.sin(omega_tau), np.cos(omega_tau) S2w = tan_2omega_tau / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau) C2w = 1 / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau) Cw = np.sqrt(0.5) * np.sqrt(1 + C2w) Sw = np.sqrt(0.5) * np.sign(S2w) * np.sqrt(1 - C2w) # ---------------------------------------------------------------------- # 2. Compute the periodogram, following Zechmeister & Kurster # and using tricks from Press & Rybicki. YY = np.dot(w, y ** 2) YC = Ch * Cw + Sh * Sw YS = Sh * Cw - Ch * Sw CC = 0.5 * (1 + C2 * C2w + S2 * S2w) SS = 0.5 * (1 - C2 * C2w - S2 * S2w) if fit_mean: CC -= (C * Cw + S * Sw) ** 2 SS -= (S * Cw - C * Sw) ** 2 power = (YC * YC / CC + YS * YS / SS) if normalization == 'standard': power /= YY elif normalization == 'model': power /= YY - power elif normalization == 'log': power = -np.log(1 - power / YY) elif normalization == 'psd': power *= 0.5 * (dy ** -2.0).sum() else: raise ValueError("normalization='{0}' " "not recognized".format(normalization)) return power
3798034e356f46d1249d40f537b29206d12d960e406668e2fe56b9d812341bde
import numpy as np def lombscargle_scipy(t, y, frequency, normalization='standard', center_data=True): """Lomb-Scargle Periodogram This is a wrapper of ``scipy.signal.lombscargle`` for computation of the Lomb-Scargle periodogram. This is a relatively fast version of the naive O[N^2] algorithm, but cannot handle heteroskedastic errors. Parameters ---------- t, y: array_like (NOT astropy.Quantities) times, values, and errors of the data points. These should be broadcastable to the same shape. frequency : array_like frequencies (not angular frequencies) at which to calculate periodogram normalization : string (optional, default='standard') Normalization to use for the periodogram. Options are 'standard', 'model', 'log', or 'psd'. center_data : bool (optional, default=True) if True, pre-center the data by subtracting the weighted mean of the input data. Returns ------- power : array_like Lomb-Scargle power associated with each frequency. Units of the result depend on the normalization. References ---------- .. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009) .. [2] W. Press et al, Numerical Recipes in C (2002) .. [3] Scargle, J.D. 1982, ApJ 263:835-853 """ try: from scipy import signal except ImportError: raise ImportError("scipy must be installed to use lombscargle_scipy") t, y = np.broadcast_arrays(t, y) # Scipy requires floating-point input t = np.asarray(t, dtype=float) y = np.asarray(y, dtype=float) frequency = np.asarray(frequency, dtype=float) if t.ndim != 1: raise ValueError("t, y, dy should be one dimensional") if frequency.ndim != 1: raise ValueError("frequency should be one-dimensional") if center_data: y = y - y.mean() # Note: scipy input accepts angular frequencies p = signal.lombscargle(t, y, 2 * np.pi * frequency) if normalization == 'psd': pass elif normalization == 'standard': p *= 2 / (t.size * np.mean(y ** 2)) elif normalization == 'log': p = -np.log(1 - 2 * p / (t.size * np.mean(y ** 2))) elif normalization == 'model': p /= 0.5 * t.size * np.mean(y ** 2) - p else: raise ValueError("normalization='{0}' " "not recognized".format(normalization)) return p
7436101a122f30e3e5f49e83989906e69dbc01c0b30d6bbfdfcc1b58b393cc08
import warnings from math import factorial import numpy as np def bitceil(N): """ Find the bit (i.e. power of 2) immediately greater than or equal to N Note: this works for numbers up to 2 ** 64. Roughly equivalent to int(2 ** np.ceil(np.log2(N))) """ return 1 << int(N - 1).bit_length() def extirpolate(x, y, N=None, M=4): """ Extirpolate the values (x, y) onto an integer grid range(N), using lagrange polynomial weights on the M nearest points. Parameters ---------- x : array_like array of abscissas y : array_like array of ordinates N : int number of integer bins to use. For best performance, N should be larger than the maximum of x M : int number of adjoining points on which to extirpolate. Returns ------- yN : ndarray N extirpolated values associated with range(N) Example ------- >>> rng = np.random.RandomState(0) >>> x = 100 * rng.rand(20) >>> y = np.sin(x) >>> y_hat = extirpolate(x, y) >>> x_hat = np.arange(len(y_hat)) >>> f = lambda x: np.sin(x / 10) >>> np.allclose(np.sum(y * f(x)), np.sum(y_hat * f(x_hat))) True Notes ----- This code is based on the C implementation of spread() presented in Numerical Recipes in C, Second Edition (Press et al. 1989; p.583). """ x, y = map(np.ravel, np.broadcast_arrays(x, y)) if N is None: N = int(np.max(x) + 0.5 * M + 1) # Now use legendre polynomial weights to populate the results array; # This is an efficient recursive implementation (See Press et al. 1989) result = np.zeros(N, dtype=y.dtype) # first take care of the easy cases where x is an integer integers = (x % 1 == 0) np.add.at(result, x[integers].astype(int), y[integers]) x, y = x[~integers], y[~integers] # For each remaining x, find the index describing the extirpolation range. # i.e. ilo[i] < x[i] < ilo[i] + M with x[i] in the center, # adjusted so that the limits are within the range 0...N ilo = np.clip((x - M // 2).astype(int), 0, N - M) numerator = y * np.prod(x - ilo - np.arange(M)[:, np.newaxis], 0) denominator = factorial(M - 1) for j in range(M): if j > 0: denominator *= j / (j - M) ind = ilo + (M - 1 - j) np.add.at(result, ind, numerator / (denominator * (x - ind))) return result def trig_sum(t, h, df, N, f0=0, freq_factor=1, oversampling=5, use_fft=True, Mfft=4): """Compute (approximate) trigonometric sums for a number of frequencies This routine computes weighted sine and cosine sums: S_j = sum_i { h_i * sin(2 pi * f_j * t_i) } C_j = sum_i { h_i * cos(2 pi * f_j * t_i) } Where f_j = freq_factor * (f0 + j * df) for the values j in 1 ... N. The sums can be computed either by a brute force O[N^2] method, or by an FFT-based O[Nlog(N)] method. Parameters ---------- t : array_like array of input times h : array_like array weights for the sum df : float frequency spacing N : int number of frequency bins to return f0 : float (optional, default=0) The low frequency to use freq_factor : float (optional, default=1) Factor which multiplies the frequency use_fft : bool if True, use the approximate FFT algorithm to compute the result. This uses the FFT with Press & Rybicki's Lagrangian extirpolation. oversampling : int (default = 5) oversampling freq_factor for the approximation; roughly the number of time samples across the highest-frequency sinusoid. This parameter contains the tradeoff between accuracy and speed. Not referenced if use_fft is False. Mfft : int The number of adjacent points to use in the FFT approximation. Not referenced if use_fft is False. Returns ------- S, C : ndarrays summation arrays for frequencies f = df * np.arange(1, N + 1) """ df *= freq_factor f0 *= freq_factor if df <= 0: raise ValueError("df must be positive") t, h = map(np.ravel, np.broadcast_arrays(t, h)) if use_fft: Mfft = int(Mfft) if Mfft <= 0: raise ValueError("Mfft must be positive") # required size of fft is the power of 2 above the oversampling rate Nfft = bitceil(N * oversampling) t0 = t.min() if f0 > 0: h = h * np.exp(2j * np.pi * f0 * (t - t0)) tnorm = ((t - t0) * Nfft * df) % Nfft grid = extirpolate(tnorm, h, Nfft, Mfft) fftgrid = np.fft.ifft(grid)[:N] if t0 != 0: f = f0 + df * np.arange(N) fftgrid *= np.exp(2j * np.pi * t0 * f) C = Nfft * fftgrid.real S = Nfft * fftgrid.imag else: f = f0 + df * np.arange(N) C = np.dot(h, np.cos(2 * np.pi * f * t[:, np.newaxis])) S = np.dot(h, np.sin(2 * np.pi * f * t[:, np.newaxis])) return S, C
55601f0eca608fe7c24677eff9ad94787793294316233712fc8edd452b543b5b
import numpy as np def lombscargle_slow(t, y, dy, frequency, normalization='standard', fit_mean=True, center_data=True): """Lomb-Scargle Periodogram This is a pure-python implementation of the original Lomb-Scargle formalism (e.g. [1]_, [2]_), with the addition of the floating mean (e.g. [3]_) Parameters ---------- t, y, dy : array_like (NOT astropy.Quantities) times, values, and errors of the data points. These should be broadcastable to the same shape. frequency : array_like frequencies (not angular frequencies) at which to calculate periodogram normalization : string (optional, default='standard') Normalization to use for the periodogram. Options are 'standard', 'model', 'log', or 'psd'. fit_mean : bool (optional, default=True) if True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage. center_data : bool (optional, default=True) if True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if ``fit_mean = False`` Returns ------- power : array_like Lomb-Scargle power associated with each frequency. Units of the result depend on the normalization. References ---------- .. [1] W. Press et al, Numerical Recipes in C (2002) .. [2] Scargle, J.D. 1982, ApJ 263:835-853 .. [3] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009) """ if dy is None: dy = 1 t, y, dy = np.broadcast_arrays(t, y, dy) frequency = np.asarray(frequency) if t.ndim != 1: raise ValueError("t, y, dy should be one dimensional") if frequency.ndim != 1: raise ValueError("frequency should be one-dimensional") w = dy ** -2.0 w /= w.sum() # if fit_mean is true, centering the data now simplifies the math below. if fit_mean or center_data: y = y - np.dot(w, y) omega = 2 * np.pi * frequency omega = omega.ravel()[np.newaxis, :] # make following arrays into column vectors t, y, dy, w = map(lambda x: x[:, np.newaxis], (t, y, dy, w)) sin_omega_t = np.sin(omega * t) cos_omega_t = np.cos(omega * t) # compute time-shift tau # S2 = np.dot(w.T, np.sin(2 * omega * t) S2 = 2 * np.dot(w.T, sin_omega_t * cos_omega_t) # C2 = np.dot(w.T, np.cos(2 * omega * t) C2 = 2 * np.dot(w.T, 0.5 - sin_omega_t ** 2) if fit_mean: S = np.dot(w.T, sin_omega_t) C = np.dot(w.T, cos_omega_t) S2 -= (2 * S * C) C2 -= (C * C - S * S) # compute components needed for the fit omega_t_tau = omega * t - 0.5 * np.arctan2(S2, C2) sin_omega_t_tau = np.sin(omega_t_tau) cos_omega_t_tau = np.cos(omega_t_tau) Y = np.dot(w.T, y) wy = w * y YCtau = np.dot(wy.T, cos_omega_t_tau) YStau = np.dot(wy.T, sin_omega_t_tau) CCtau = np.dot(w.T, cos_omega_t_tau * cos_omega_t_tau) SStau = np.dot(w.T, sin_omega_t_tau * sin_omega_t_tau) if fit_mean: Ctau = np.dot(w.T, cos_omega_t_tau) Stau = np.dot(w.T, sin_omega_t_tau) YCtau -= Y * Ctau YStau -= Y * Stau CCtau -= Ctau * Ctau SStau -= Stau * Stau p = (YCtau * YCtau / CCtau + YStau * YStau / SStau) YY = np.dot(w.T, y * y) if normalization == 'standard': p /= YY elif normalization == 'model': p /= YY - p elif normalization == 'log': p = -np.log(1 - p / YY) elif normalization == 'psd': p *= 0.5 * (dy ** -2.0).sum() else: raise ValueError("normalization='{0}' " "not recognized".format(normalization)) return p.ravel()
cf63267c38d54e3f6f6f4244a4da5170244f95a6b0015b030760323d3484fce4
import numpy as np from .utils import trig_sum def lombscargle_fastchi2(t, y, dy, f0, df, Nf, normalization='standard', fit_mean=True, center_data=True, nterms=1, use_fft=True, trig_sum_kwds=None): """Lomb-Scargle Periodogram This implements a fast chi-squared periodogram using the algorithm outlined in [4]_. The result is identical to the standard Lomb-Scargle periodogram. The advantage of this algorithm is the ability to compute multiterm periodograms relatively quickly. Parameters ---------- t, y, dy : array_like (NOT astropy.Quantities) times, values, and errors of the data points. These should be broadcastable to the same shape. f0, df, Nf : (float, float, int) parameters describing the frequency grid, f = f0 + df * arange(Nf). normalization : string (optional, default='standard') Normalization to use for the periodogram. Options are 'standard', 'model', 'log', or 'psd'. fit_mean : bool (optional, default=True) if True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage. center_data : bool (optional, default=True) if True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if ``fit_mean = False`` nterms : int (optional, default=1) Number of Fourier terms in the fit Returns ------- power : array_like Lomb-Scargle power associated with each frequency. Units of the result depend on the normalization. References ---------- .. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009) .. [2] W. Press et al, Numerical Recipes in C (2002) .. [3] Scargle, J.D. ApJ 263:835-853 (1982) .. [4] Palmer, J. ApJ 695:496-502 (2009) """ if nterms == 0 and not fit_mean: raise ValueError("Cannot have nterms = 0 without fitting bias") if dy is None: dy = 1 # Validate and setup input data t, y, dy = np.broadcast_arrays(t, y, dy) if t.ndim != 1: raise ValueError("t, y, dy should be one dimensional") # Validate and setup frequency grid if f0 < 0: raise ValueError("Frequencies must be positive") if df <= 0: raise ValueError("Frequency steps must be positive") if Nf <= 0: raise ValueError("Number of frequencies must be positive") w = dy ** -2.0 ws = np.sum(w) # if fit_mean is true, centering the data now simplifies the math below. if center_data or fit_mean: y = y - np.dot(w, y) / ws yw = y / dy chi2_ref = np.dot(yw, yw) kwargs = dict.copy(trig_sum_kwds or {}) kwargs.update(f0=f0, df=df, use_fft=use_fft, N=Nf) # Here we build-up the matrices XTX and XTy using pre-computed # sums. The relevant identities are # 2 sin(mx) sin(nx) = cos(m-n)x - cos(m+n)x # 2 cos(mx) cos(nx) = cos(m-n)x + cos(m+n)x # 2 sin(mx) cos(nx) = sin(m-n)x + sin(m+n)x yws = np.sum(y * w) SCw = [(np.zeros(Nf), ws * np.ones(Nf))] SCw.extend([trig_sum(t, w, freq_factor=i, **kwargs) for i in range(1, 2 * nterms + 1)]) Sw, Cw = zip(*SCw) SCyw = [(np.zeros(Nf), yws * np.ones(Nf))] SCyw.extend([trig_sum(t, w * y, freq_factor=i, **kwargs) for i in range(1, nterms + 1)]) Syw, Cyw = zip(*SCyw) # Now create an indexing scheme so we can quickly # build-up matrices at each frequency order = [('C', 0)] if fit_mean else [] order.extend(sum([[('S', i), ('C', i)] for i in range(1, nterms + 1)], [])) funcs = dict(S=lambda m, i: Syw[m][i], C=lambda m, i: Cyw[m][i], SS=lambda m, n, i: 0.5 * (Cw[abs(m - n)][i] - Cw[m + n][i]), CC=lambda m, n, i: 0.5 * (Cw[abs(m - n)][i] + Cw[m + n][i]), SC=lambda m, n, i: 0.5 * (np.sign(m - n) * Sw[abs(m - n)][i] + Sw[m + n][i]), CS=lambda m, n, i: 0.5 * (np.sign(n - m) * Sw[abs(n - m)][i] + Sw[n + m][i])) def compute_power(i): XTX = np.array([[funcs[A[0] + B[0]](A[1], B[1], i) for A in order] for B in order]) XTy = np.array([funcs[A[0]](A[1], i) for A in order]) return np.dot(XTy.T, np.linalg.solve(XTX, XTy)) p = np.array([compute_power(i) for i in range(Nf)]) if normalization == 'psd': p *= 0.5 elif normalization == 'standard': p /= chi2_ref elif normalization == 'log': p = -np.log(1 - p / chi2_ref) elif normalization == 'model': p /= chi2_ref - p else: raise ValueError("normalization='{0}' " "not recognized".format(normalization)) return p
f776e9efdbd436418be0150723f5621da099737958b0d3dd64847642fd8f838d
import numpy as np def design_matrix(t, frequency, dy=None, bias=True, nterms=1): """Compute the Lomb-Scargle design matrix at the given frequency This is the matrix X such that the periodic model at the given frequency can be expressed :math:`\\hat{y} = X \\theta`. Parameters ---------- t : array_like, shape=(n_times,) times at which to compute the design matrix frequency : float frequency for the design matrix dy : float or array_like (optional) data uncertainties: should be broadcastable with `t` bias : bool (default=True) If true, include a bias column in the matrix nterms : int (default=1) Number of Fourier terms to include in the model Returns ------- X : ndarray, shape=(n_times, n_parameters) The design matrix, where n_parameters = bool(bias) + 2 * nterms """ t = np.asarray(t) frequency = np.asarray(frequency) if t.ndim != 1: raise ValueError("t should be one dimensional") if frequency.ndim != 0: raise ValueError("frequency must be a scalar") if nterms == 0 and not bias: raise ValueError("cannot have nterms=0 and no bias") if bias: cols = [np.ones_like(t)] else: cols = [] for i in range(1, nterms + 1): cols.append(np.sin(2 * np.pi * i * frequency * t)) cols.append(np.cos(2 * np.pi * i * frequency * t)) XT = np.vstack(cols) if dy is not None: XT /= dy return np.transpose(XT) def periodic_fit(t, y, dy, frequency, t_fit, center_data=True, fit_mean=True, nterms=1): """Compute the Lomb-Scargle model fit at a given frequency Parameters ---------- t, y, dy : float or array_like The times, observations, and uncertainties to fit frequency : float The frequency at which to compute the model t_fit : float or array_like The times at which the fit should be computed center_data : bool (default=True) If True, center the input data before applying the fit fit_mean : bool (default=True) If True, include the bias as part of the model nterms : int (default=1) The number of Fourier terms to include in the fit Returns ------- y_fit : ndarray The model fit evaluated at each value of t_fit """ t, y, frequency = map(np.asarray, (t, y, frequency)) if dy is None: dy = np.ones_like(y) else: dy = np.asarray(dy) t_fit = np.asarray(t_fit) if t.ndim != 1: raise ValueError("t, y, dy should be one dimensional") if t_fit.ndim != 1: raise ValueError("t_fit should be one dimensional") if frequency.ndim != 0: raise ValueError("frequency should be a scalar") if center_data: w = dy ** -2.0 y_mean = np.dot(y, w) / w.sum() y = (y - y_mean) else: y_mean = 0 X = design_matrix(t, frequency, dy=dy, bias=fit_mean, nterms=nterms) theta_MLE = np.linalg.solve(np.dot(X.T, X), np.dot(X.T, y / dy)) X_fit = design_matrix(t_fit, frequency, bias=fit_mean, nterms=nterms) return y_mean + np.dot(X_fit, theta_MLE)
3ba44a4055c92f6946877146b8dc6c693bc431a89d906651e88be8b5b5a3a1c4
import numpy as np import pytest from numpy.testing import assert_allclose try: import scipy except ImportError: HAS_SCIPY = False else: HAS_SCIPY = True from .. import LombScargle from .._statistics import (cdf_single, pdf_single, fap_single, inv_fap_single, METHODS) from ..utils import convert_normalization, compute_chi2_ref METHOD_KWDS = dict(bootstrap={'n_bootstraps': 20, 'random_seed': 42}) NORMALIZATIONS = ['standard', 'psd', 'log', 'model'] @pytest.fixture def data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0): """Generate some data for testing""" rng = np.random.RandomState(rseed) t = 5 * period * rng.rand(N) omega = 2 * np.pi / period y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t) dy = dy * (0.5 + rng.rand(N)) y += dy * rng.randn(N) return t, y, dy @pytest.fixture def null_data(N=1000, dy=1, rseed=0): """Generate null hypothesis data""" rng = np.random.RandomState(rseed) t = 100 * rng.rand(N) dy = 0.5 * dy * (1 + rng.rand(N)) y = dy * rng.randn(N) return t, y, dy @pytest.mark.parametrize('normalization', NORMALIZATIONS) @pytest.mark.parametrize('with_errors', [True, False]) def test_distribution(null_data, normalization, with_errors, fmax=40): t, y, dy = null_data if not with_errors: dy = None N = len(t) ls = LombScargle(t, y, dy, normalization=normalization) freq, power = ls.autopower(maximum_frequency=fmax) z = np.linspace(0, power.max(), 1000) # Test that pdf and cdf are consistent dz = z[1] - z[0] z_mid = z[:-1] + 0.5 * dz pdf = ls.distribution(z_mid) cdf = ls.distribution(z, cumulative=True) assert_allclose(pdf, np.diff(cdf) / dz, rtol=1E-5, atol=1E-8) # psd normalization without specified errors produces bad results if not (normalization == 'psd' and not with_errors): # Test that observed power is distributed according to the theoretical pdf hist, bins = np.histogram(power, 30, normed=True) midpoints = 0.5 * (bins[1:] + bins[:-1]) pdf = ls.distribution(midpoints) assert_allclose(hist, pdf, rtol=0.05, atol=0.05 * pdf[0]) @pytest.mark.parametrize('N', [10, 100, 1000]) @pytest.mark.parametrize('normalization', NORMALIZATIONS) def test_inverse_single(N, normalization): fap = np.linspace(0, 1, 100) z = inv_fap_single(fap, N, normalization) fap_out = fap_single(z, N, normalization) assert_allclose(fap, fap_out) @pytest.mark.parametrize('normalization', NORMALIZATIONS) @pytest.mark.parametrize('use_errs', [True, False]) def test_inverse_bootstrap(null_data, normalization, use_errs, fmax=5): t, y, dy = null_data if not use_errs: dy = None fap = np.linspace(0, 1, 10) method = 'bootstrap' method_kwds = METHOD_KWDS['bootstrap'] ls = LombScargle(t, y, dy, normalization=normalization) z = ls.false_alarm_level(fap, maximum_frequency=fmax, method=method, method_kwds=method_kwds) fap_out = ls.false_alarm_probability(z, maximum_frequency=fmax, method=method, method_kwds=method_kwds) # atol = 1 / n_bootstraps assert_allclose(fap, fap_out, atol=0.05) @pytest.mark.parametrize('method', sorted(set(METHODS) - {'bootstrap'})) @pytest.mark.parametrize('normalization', NORMALIZATIONS) @pytest.mark.parametrize('use_errs', [True, False]) @pytest.mark.parametrize('N', [10, 100, 1000]) def test_inverses(method, normalization, use_errs, N, T=5, fmax=5): if not HAS_SCIPY and method in ['baluev', 'davies']: pytest.skip("SciPy required") t, y, dy = data(N, rseed=543) if not use_errs: dy = None method_kwds = METHOD_KWDS.get(method, None) fap = np.logspace(-10, 0, 10) ls = LombScargle(t, y, dy, normalization=normalization) z = ls.false_alarm_level(fap, maximum_frequency=fmax, method=method, method_kwds=method_kwds) fap_out = ls.false_alarm_probability(z, maximum_frequency=fmax, method=method, method_kwds=method_kwds) assert_allclose(fap, fap_out) @pytest.mark.parametrize('method', sorted(METHODS)) @pytest.mark.parametrize('normalization', NORMALIZATIONS) def test_false_alarm_smoketest(method, normalization, data): if not HAS_SCIPY and method in ['baluev', 'davies']: pytest.skip("SciPy required") kwds = METHOD_KWDS.get(method, None) t, y, dy = data fmax = 5 ls = LombScargle(t, y, dy, normalization=normalization) freq, power = ls.autopower(maximum_frequency=fmax) Z = np.linspace(power.min(), power.max(), 30) fap = ls.false_alarm_probability(Z, maximum_frequency=fmax, method=method, method_kwds=kwds) assert len(fap) == len(Z) if method != 'davies': assert np.all(fap <= 1) assert np.all(fap[:-1] >= fap[1:]) # monotonically decreasing @pytest.mark.parametrize('method', sorted(METHODS)) @pytest.mark.parametrize('use_errs', [True, False]) @pytest.mark.parametrize('normalization', sorted(set(NORMALIZATIONS) - {'psd'})) def test_false_alarm_equivalence(method, normalization, use_errs, data): # Note: the PSD normalization is not equivalent to the others, in that it # depends on the absolute errors rather than relative errors. Because the # scaling contributes to the distribution, it cannot be converted directly # from any of the three normalized versions. if not HAS_SCIPY and method in ['baluev', 'davies']: pytest.skip("SciPy required") kwds = METHOD_KWDS.get(method, None) t, y, dy = data if not use_errs: dy = None fmax = 5 ls = LombScargle(t, y, dy, normalization=normalization) freq, power = ls.autopower(maximum_frequency=fmax) Z = np.linspace(power.min(), power.max(), 30) fap = ls.false_alarm_probability(Z, maximum_frequency=fmax, method=method, method_kwds=kwds) # Compute the equivalent Z values in the standard normalization # and check that the FAP is consistent Z_std = convert_normalization(Z, len(t), from_normalization=normalization, to_normalization='standard', chi2_ref=compute_chi2_ref(y, dy)) ls = LombScargle(t, y, dy, normalization='standard') fap_std = ls.false_alarm_probability(Z_std, maximum_frequency=fmax, method=method, method_kwds=kwds) assert_allclose(fap, fap_std, rtol=0.1)
d94f9a18e82772bf983c15205bc7817e07ccc6258b410d340d9bb4b46608485b
import numpy as np import pytest from numpy.testing import assert_allclose from ..utils import convert_normalization, compute_chi2_ref from ..core import LombScargle NORMALIZATIONS = ['standard', 'model', 'log', 'psd'] @pytest.fixture def data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0): """Generate some data for testing""" rng = np.random.RandomState(rseed) t = 5 * period * rng.rand(N) omega = 2 * np.pi / period y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t) dy = dy * (0.5 + rng.rand(N)) y += dy * rng.randn(N) return t, y, dy @pytest.mark.parametrize('norm_in', NORMALIZATIONS) @pytest.mark.parametrize('norm_out', NORMALIZATIONS) def test_convert_normalization(norm_in, norm_out, data): t, y, dy = data _, power_in = LombScargle(t, y, dy).autopower(maximum_frequency=5, normalization=norm_in) _, power_out = LombScargle(t, y, dy).autopower(maximum_frequency=5, normalization=norm_out) power_in_converted = convert_normalization(power_in, N=len(t), from_normalization=norm_in, to_normalization=norm_out, chi2_ref = compute_chi2_ref(y, dy)) assert_allclose(power_in_converted, power_out)
fc3e234bb6fb1c2205d89403f9618d82093cc7611feb141e336cf3bf0195c3b3
import pytest import numpy as np from numpy.testing import assert_allclose from .... import units from ....tests.helper import assert_quantity_allclose from .. import LombScargle ALL_METHODS = LombScargle.available_methods ALL_METHODS_NO_AUTO = [method for method in ALL_METHODS if method != 'auto'] FAST_METHODS = [method for method in ALL_METHODS if 'fast' in method] NTERMS_METHODS = [method for method in ALL_METHODS if 'chi2' in method] NORMALIZATIONS = ['standard', 'psd', 'log', 'model'] @pytest.fixture def data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0): """Generate some data for testing""" rng = np.random.RandomState(rseed) t = 20 * period * rng.rand(N) omega = 2 * np.pi / period y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t) dy = dy * (0.5 + rng.rand(N)) y += dy * rng.randn(N) return t, y, dy @pytest.mark.parametrize('minimum_frequency', [None, 1.0]) @pytest.mark.parametrize('maximum_frequency', [None, 5.0]) @pytest.mark.parametrize('nyquist_factor', [1, 10]) @pytest.mark.parametrize('samples_per_peak', [1, 5]) def test_autofrequency(data, minimum_frequency, maximum_frequency, nyquist_factor, samples_per_peak): t, y, dy = data baseline = t.max() - t.min() freq = LombScargle(t, y, dy).autofrequency(samples_per_peak, nyquist_factor, minimum_frequency, maximum_frequency) df = freq[1] - freq[0] # Check sample spacing assert_allclose(df, 1. / baseline / samples_per_peak) # Check minimum frequency if minimum_frequency is None: assert_allclose(freq[0], 0.5 * df) else: assert_allclose(freq[0], minimum_frequency) if maximum_frequency is None: avg_nyquist = 0.5 * len(t) / baseline assert_allclose(freq[-1], avg_nyquist * nyquist_factor, atol=0.5*df) else: assert_allclose(freq[-1], maximum_frequency, atol=0.5*df) @pytest.mark.parametrize('method', ALL_METHODS_NO_AUTO) @pytest.mark.parametrize('center_data', [True, False]) @pytest.mark.parametrize('fit_mean', [True, False]) @pytest.mark.parametrize('with_errors', [True, False]) @pytest.mark.parametrize('with_units', [True, False]) @pytest.mark.parametrize('normalization', NORMALIZATIONS) def test_all_methods(data, method, center_data, fit_mean, with_errors, with_units, normalization): if method == 'scipy' and (fit_mean or with_errors): return t, y, dy = data frequency = 0.8 + 0.01 * np.arange(40) if with_units: t = t * units.day y = y * units.mag dy = dy * units.mag frequency = frequency / t.unit if not with_errors: dy = None kwds = {} ls = LombScargle(t, y, dy, center_data=center_data, fit_mean=fit_mean, normalization=normalization) P_expected = ls.power(frequency) # don't use the fft approximation here; we'll test this elsewhere if method in FAST_METHODS: kwds['method_kwds'] = dict(use_fft=False) P_method = ls.power(frequency, method=method, **kwds) if with_units: if normalization == 'psd' and not with_errors: assert P_method.unit == y.unit ** 2 else: assert P_method.unit == units.dimensionless_unscaled else: assert not hasattr(P_method, 'unit') assert_quantity_allclose(P_expected, P_method) @pytest.mark.parametrize('method', ALL_METHODS_NO_AUTO) @pytest.mark.parametrize('center_data', [True, False]) @pytest.mark.parametrize('fit_mean', [True, False]) @pytest.mark.parametrize('with_errors', [True, False]) @pytest.mark.parametrize('normalization', NORMALIZATIONS) def test_integer_inputs(data, method, center_data, fit_mean, with_errors, normalization): if method == 'scipy' and (fit_mean or with_errors): return t, y, dy = data t = np.floor(100 * t) t_int = t.astype(int) y = np.floor(100 * y) y_int = y.astype(int) dy = np.floor(100 * dy) dy_int = dy.astype('int32') frequency = 1E-2 * (0.8 + 0.01 * np.arange(40)) if not with_errors: dy = None dy_int = None kwds = dict(center_data=center_data, fit_mean=fit_mean, normalization=normalization) P_float = LombScargle(t, y, dy, **kwds).power(frequency,method=method) P_int = LombScargle(t_int, y_int, dy_int, **kwds).power(frequency, method=method) assert_allclose(P_float, P_int) @pytest.mark.parametrize('method', NTERMS_METHODS) @pytest.mark.parametrize('center_data', [True, False]) @pytest.mark.parametrize('fit_mean', [True, False]) @pytest.mark.parametrize('with_errors', [True, False]) @pytest.mark.parametrize('nterms', [0, 2, 4]) @pytest.mark.parametrize('normalization', NORMALIZATIONS) def test_nterms_methods(method, center_data, fit_mean, with_errors, nterms, normalization, data): t, y, dy = data frequency = 0.8 + 0.01 * np.arange(40) if not with_errors: dy = None ls = LombScargle(t, y, dy, center_data=center_data, fit_mean=fit_mean, nterms=nterms, normalization=normalization) if nterms == 0 and not fit_mean: with pytest.raises(ValueError) as err: ls.power(frequency, method=method) assert 'nterms' in str(err.value) and 'bias' in str(err.value) else: P_expected = ls.power(frequency) # don't use fast fft approximations here kwds = {} if 'fast' in method: kwds['method_kwds'] = dict(use_fft=False) P_method = ls.power(frequency, method=method, **kwds) assert_allclose(P_expected, P_method, rtol=1E-7, atol=1E-25) @pytest.mark.parametrize('method', FAST_METHODS) @pytest.mark.parametrize('center_data', [True, False]) @pytest.mark.parametrize('fit_mean', [True, False]) @pytest.mark.parametrize('with_errors', [True, False]) @pytest.mark.parametrize('nterms', [0, 1, 2]) def test_fast_approximations(method, center_data, fit_mean, with_errors, nterms, data): t, y, dy = data frequency = 0.8 + 0.01 * np.arange(40) if not with_errors: dy = None ls = LombScargle(t, y, dy, center_data=center_data, fit_mean=fit_mean, nterms=nterms, normalization='standard') # use only standard normalization because we compare via absolute tolerance kwds = dict(method=method) if method == 'fast' and nterms != 1: with pytest.raises(ValueError) as err: ls.power(frequency, **kwds) assert 'nterms' in str(err.value) elif nterms == 0 and not fit_mean: with pytest.raises(ValueError) as err: ls.power(frequency, **kwds) assert 'nterms' in str(err.value) and 'bias' in str(err.value) else: P_fast = ls.power(frequency, **kwds) kwds['method_kwds'] = dict(use_fft=False) P_slow = ls.power(frequency, **kwds) assert_allclose(P_fast, P_slow, atol=0.008) @pytest.mark.parametrize('method', LombScargle.available_methods) @pytest.mark.parametrize('shape', [(), (1,), (2,), (3,), (2, 3)]) def test_output_shapes(method, shape, data): t, y, dy = data freq = np.asarray(np.zeros(shape)) freq.flat = np.arange(1, freq.size + 1) PLS = LombScargle(t, y, fit_mean=False).power(freq, method=method) assert PLS.shape == shape @pytest.mark.parametrize('method', LombScargle.available_methods) def test_errors_on_unit_mismatch(method, data): t, y, dy = data t = t * units.second y = y * units.mag frequency = np.linspace(0.5, 1.5, 10) # this should fail because frequency and 1/t units do not match with pytest.raises(ValueError) as err: LombScargle(t, y, fit_mean=False).power(frequency, method=method) assert str(err.value).startswith('Units of frequency not equivalent') # this should fail because dy and y units do not match with pytest.raises(ValueError) as err: LombScargle(t, y, dy, fit_mean=False).power(frequency / t.unit) assert str(err.value).startswith('Units of dy not equivalent') # we don't test all normalizations here because they are tested above # only test method='auto' because unit handling does not depend on method @pytest.mark.parametrize('fit_mean', [True, False]) @pytest.mark.parametrize('center_data', [True, False]) @pytest.mark.parametrize('normalization', ['standard', 'psd']) @pytest.mark.parametrize('with_error', [True, False]) def test_unit_conversions(data, fit_mean, center_data, normalization, with_error): t, y, dy = data t_day = t * units.day t_hour = units.Quantity(t_day, 'hour') y_meter = y * units.meter y_millimeter = units.Quantity(y_meter, 'millimeter') # sanity check on inputs assert_quantity_allclose(t_day, t_hour) assert_quantity_allclose(y_meter, y_millimeter) if with_error: dy = dy * units.meter else: dy = None freq_day, P1 = LombScargle(t_day, y_meter, dy).autopower() freq_hour, P2 = LombScargle(t_hour, y_millimeter, dy).autopower() # Check units of frequency assert freq_day.unit == 1. / units.day assert freq_hour.unit == 1. / units.hour # Check that results match assert_quantity_allclose(freq_day, freq_hour) assert_quantity_allclose(P1, P2) # Check that switching frequency units doesn't change things P3 = LombScargle(t_day, y_meter, dy).power(freq_hour) P4 = LombScargle(t_hour, y_meter, dy).power(freq_day) assert_quantity_allclose(P3, P4) @pytest.mark.parametrize('fit_mean', [True, False]) @pytest.mark.parametrize('with_units', [True, False]) @pytest.mark.parametrize('freq', [1.0, 2.0]) def test_model(fit_mean, with_units, freq): rand = np.random.RandomState(0) t = 10 * rand.rand(40) params = 10 * rand.rand(3) y = np.zeros_like(t) if fit_mean: y += params[0] y += params[1] * np.sin(2 * np.pi * freq * (t - params[2])) if with_units: t = t * units.day y = y * units.mag freq = freq / units.day ls = LombScargle(t, y, center_data=False, fit_mean=fit_mean) y_fit = ls.model(t, freq) assert_quantity_allclose(y_fit, y) @pytest.mark.parametrize('t_unit', [units.second, units.day]) @pytest.mark.parametrize('frequency_unit', [units.Hz, 1. / units.second]) @pytest.mark.parametrize('y_unit', [units.mag, units.jansky]) def test_model_units_match(data, t_unit, frequency_unit, y_unit): t, y, dy = data t_fit = t[:5] frequency = 1.0 t = t * t_unit t_fit = t_fit * t_unit y = y * y_unit dy = dy * y_unit frequency = frequency * frequency_unit ls = LombScargle(t, y, dy) y_fit = ls.model(t_fit, frequency) assert y_fit.unit == y_unit def test_model_units_mismatch(data): t, y, dy = data frequency = 1.0 t_fit = t[:5] t = t * units.second t_fit = t_fit * units.second y = y * units.mag frequency = 1.0 / t.unit # this should fail because frequency and 1/t units do not match with pytest.raises(ValueError) as err: LombScargle(t, y).model(t_fit, frequency=1.0) assert str(err.value).startswith('Units of frequency not equivalent') # this should fail because t and t_fit units do not match with pytest.raises(ValueError) as err: LombScargle(t, y).model([1, 2], frequency) assert str(err.value).startswith('Units of t not equivalent') # this should fail because dy and y units do not match with pytest.raises(ValueError) as err: LombScargle(t, y, dy).model(t_fit, frequency) assert str(err.value).startswith('Units of dy not equivalent') def test_autopower(data): t, y, dy = data ls = LombScargle(t, y, dy) kwargs = dict(samples_per_peak=6, nyquist_factor=2, minimum_frequency=2, maximum_frequency=None) freq1 = ls.autofrequency(**kwargs) power1 = ls.power(freq1) freq2, power2 = ls.autopower(**kwargs) assert_allclose(freq1, freq2) assert_allclose(power1, power2)
a2f091d54c00b09e53cd9f73e0a0247e77bf0245baa331a5cfcb0c42db27e2ef
import pytest import numpy as np from numpy.testing import assert_allclose, assert_equal from ..utils import extirpolate, bitceil, trig_sum @pytest.mark.parametrize('N', 2 ** np.arange(1, 12)) @pytest.mark.parametrize('offset', [-1, 0, 1]) def test_bitceil(N, offset): assert_equal(bitceil(N + offset), int(2 ** np.ceil(np.log2(N + offset)))) @pytest.fixture def extirpolate_data(): rng = np.random.RandomState(0) x = 100 * rng.rand(50) y = np.sin(x) f = lambda x: np.sin(x / 10) return x, y, f @pytest.mark.parametrize('N', [100, None]) @pytest.mark.parametrize('M', [5]) def test_extirpolate(N, M, extirpolate_data): x, y, f = extirpolate_data y_hat = extirpolate(x, y, N, M) x_hat = np.arange(len(y_hat)) assert_allclose(np.dot(f(x), y), np.dot(f(x_hat), y_hat)) @pytest.fixture def extirpolate_int_data(): rng = np.random.RandomState(0) x = 100 * rng.rand(50) x[:25] = x[:25].astype(int) y = np.sin(x) f = lambda x: np.sin(x / 10) return x, y, f @pytest.mark.parametrize('N', [100, None]) @pytest.mark.parametrize('M', [5]) def test_extirpolate_with_integers(N, M, extirpolate_int_data): x, y, f = extirpolate_int_data y_hat = extirpolate(x, y, N, M) x_hat = np.arange(len(y_hat)) assert_allclose(np.dot(f(x), y), np.dot(f(x_hat), y_hat)) @pytest.fixture def trig_sum_data(): rng = np.random.RandomState(0) t = 10 * rng.rand(50) h = np.sin(t) return t, h @pytest.mark.parametrize('f0', [0, 1]) @pytest.mark.parametrize('adjust_t', [True, False]) @pytest.mark.parametrize('freq_factor', [1, 2]) @pytest.mark.parametrize('df', [0.1]) def test_trig_sum(f0, adjust_t, freq_factor, df, trig_sum_data): t, h = trig_sum_data tfit = t - t.min() if adjust_t else t S1, C1 = trig_sum(tfit, h, df, N=1000, use_fft=True, f0=f0, freq_factor=freq_factor, oversampling=10) S2, C2 = trig_sum(tfit, h, df, N=1000, use_fft=False, f0=f0, freq_factor=freq_factor, oversampling=10) assert_allclose(S1, S2, atol=1E-2) assert_allclose(C1, C2, atol=1E-2)
d79cf013de472b2a52b91b83fdf2b3df2447a99ddecf1e8cf16299ff37b06e12
import pytest import numpy as np from numpy.testing import assert_allclose from ..mle import design_matrix, periodic_fit @pytest.fixture def t(): rand = np.random.RandomState(42) return 10 * rand.rand(10) @pytest.mark.parametrize('freq', [1.0, 2]) @pytest.mark.parametrize('dy', [None, 2.0]) @pytest.mark.parametrize('bias', [True, False]) def test_design_matrix(t, freq, dy, bias): X = design_matrix(t, freq, dy, bias=bias) assert X.shape == (t.shape[0], 2 + bool(bias)) if bias: assert_allclose(X[:, 0], 1. / (dy or 1.0)) assert_allclose(X[:, -2], np.sin(2 * np.pi * freq * t) / (dy or 1.0)) assert_allclose(X[:, -1], np.cos(2 * np.pi * freq * t) / (dy or 1.0)) @pytest.mark.parametrize('nterms', range(4)) def test_multiterm_design_matrix(t, nterms): dy = 2.0 freq = 1.5 X = design_matrix(t, freq, dy=dy, bias=True, nterms=nterms) assert X.shape == (t.shape[0], 1 + 2 * nterms) assert_allclose(X[:, 0], 1. / dy) for i in range(1, nterms + 1): assert_allclose(X[:, 2 * i - 1], np.sin(2 * np.pi * i * freq * t) / dy) assert_allclose(X[:, 2 * i], np.cos(2 * np.pi * i * freq * t) / dy) @pytest.mark.parametrize('nterms', range(1, 4)) @pytest.mark.parametrize('freq', [1, 2]) @pytest.mark.parametrize('fit_mean', [True, False]) def test_exact_mle_fit(nterms, freq, fit_mean): rand = np.random.RandomState(42) t = 10 * rand.rand(30) theta = -1 + rand.rand(2 * nterms + 1) y = np.zeros(t.shape) if fit_mean: y = theta[0] * np.ones(t.shape) for i in range(1, nterms + 1): y += theta[2 * i - 1] * np.sin(2 * np.pi * i * freq * t) y += theta[2 * i] * np.cos(2 * np.pi * i * freq * t) y_fit = periodic_fit(t, y, dy=1, frequency=freq, t_fit=t, nterms=nterms, center_data=False, fit_mean=fit_mean) assert_allclose(y, y_fit)
ac698e303b5ef5e1e02ab95c076e688559d7360d7ac7d14d69ed36ae0fe81815
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from ..sorted_array import SortedArray from ..table import Table @pytest.fixture def array(): # composite index col0 = np.array([x % 2 for x in range(1, 11)]) col1 = np.array([x for x in range(1, 11)]) t = Table([col0, col1]) t = t[t.argsort()] return SortedArray(t, t['col1'].copy()) @pytest.fixture def wide_array(): # array with 100 columns t = Table([[x] * 10 for x in np.arange(100)]) return SortedArray(t, t['col0'].copy()) def test_array_find(array): for i in range(1, 11): print("Searching for {0}".format(i)) assert array.find((i % 2, i)) == [i] assert array.find((1, 4)) == [] def test_array_range(array): assert np.all(array.range((0, 8), (1, 3), (True, True)) == [8, 10, 1, 3]) assert np.all(array.range((0, 8), (1, 3), (False, True)) == [10, 1, 3]) assert np.all(array.range((0, 8), (1, 3), (True, False)) == [8, 10, 1]) def test_wide_array(wide_array): # checks for a previous bug in which the length of a # sliced SortedArray was set to the number of columns # instead of the number of elements in each column first_row = wide_array[:1].data assert np.all(first_row == Table([[x] for x in np.arange(100)]))
26be662daced380ef6dbf0228950a7296202478da860d9112e2789f1666bb70a
import numpy as np from .. import np_utils def test_common_dtype(): """ Test that allowed combinations are those expected. """ dtype = [(str('int'), int), (str('uint8'), np.uint8), (str('float32'), np.float32), (str('float64'), np.float64), (str('str'), 'S2'), (str('uni'), 'U2'), (str('bool'), bool), (str('object'), np.object_)] arr = np.empty(1, dtype=dtype) fail = set() succeed = set() for name1, type1 in dtype: for name2, type2 in dtype: try: np_utils.common_dtype([arr[name1], arr[name2]]) succeed.add('{0} {1}'.format(name1, name2)) except np_utils.TableMergeError: fail.add('{0} {1}'.format(name1, name2)) # known bad combinations bad = set(['str int', 'str bool', 'uint8 bool', 'uint8 str', 'object float32', 'bool object', 'uni uint8', 'int str', 'bool str', 'bool float64', 'bool uni', 'str float32', 'uni float64', 'uni object', 'bool uint8', 'object float64', 'float32 bool', 'str uint8', 'uni bool', 'float64 bool', 'float64 object', 'int bool', 'uni int', 'uint8 object', 'int uni', 'uint8 uni', 'float32 uni', 'object uni', 'bool float32', 'uni float32', 'object str', 'int object', 'str float64', 'object int', 'float64 uni', 'bool int', 'object bool', 'object uint8', 'float32 object', 'str object', 'float64 str', 'float32 str']) assert fail == bad good = set(['float64 int', 'int int', 'uint8 float64', 'uint8 int', 'str uni', 'float32 float32', 'float64 float64', 'float64 uint8', 'float64 float32', 'int uint8', 'int float32', 'uni str', 'int float64', 'uint8 float32', 'float32 int', 'float32 uint8', 'bool bool', 'uint8 uint8', 'str str', 'float32 float64', 'object object', 'uni uni']) assert succeed == good
21e9872d97b51031377a647d75040a6a8f373d1ed6d000671e8c513e9e817a95
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import gc import sys import copy from io import StringIO from collections import OrderedDict import pytest import numpy as np from numpy.testing import assert_allclose from ...io import fits from ...tests.helper import (assert_follows_unicode_guidelines, ignore_warnings, catch_warnings) from ...utils.data import get_pkg_data_filename from ... import table from ... import units as u from .conftest import MaskedTable try: with ignore_warnings(DeprecationWarning): # Ignore DeprecationWarning on pandas import in Python 3.5--see # https://github.com/astropy/astropy/issues/4380 import pandas # pylint: disable=W0611 except ImportError: HAS_PANDAS = False else: HAS_PANDAS = True class SetupData: def _setup(self, table_types): self._table_type = table_types.Table self._column_type = table_types.Column @property def a(self): if self._column_type is not None: if not hasattr(self, '_a'): self._a = self._column_type( [1, 2, 3], name='a', format='%d', meta={'aa': [0, 1, 2, 3, 4]}) return self._a @property def b(self): if self._column_type is not None: if not hasattr(self, '_b'): self._b = self._column_type( [4, 5, 6], name='b', format='%d', meta={'aa': 1}) return self._b @property def c(self): if self._column_type is not None: if not hasattr(self, '_c'): self._c = self._column_type([7, 8, 9], 'c') return self._c @property def d(self): if self._column_type is not None: if not hasattr(self, '_d'): self._d = self._column_type([7, 8, 7], 'd') return self._d @property def obj(self): if self._column_type is not None: if not hasattr(self, '_obj'): self._obj = self._column_type([1, 'string', 3], 'obj', dtype='O') return self._obj @property def t(self): if self._table_type is not None: if not hasattr(self, '_t'): self._t = self._table_type([self.a, self.b]) return self._t @pytest.mark.usefixtures('table_types') class TestSetTableColumn(SetupData): def test_set_row(self, table_types): """Set a row from a tuple of values""" self._setup(table_types) t = table_types.Table([self.a, self.b]) t[1] = (20, 21) assert t['a'][0] == 1 assert t['a'][1] == 20 assert t['a'][2] == 3 assert t['b'][0] == 4 assert t['b'][1] == 21 assert t['b'][2] == 6 def test_set_row_existing(self, table_types): """Set a row from another existing row""" self._setup(table_types) t = table_types.Table([self.a, self.b]) t[0] = t[1] assert t[0][0] == 2 assert t[0][1] == 5 def test_set_row_fail_1(self, table_types): """Set a row from an incorrectly-sized or typed set of values""" self._setup(table_types) t = table_types.Table([self.a, self.b]) with pytest.raises(ValueError): t[1] = (20, 21, 22) with pytest.raises(ValueError): t[1] = 0 def test_set_row_fail_2(self, table_types): """Set a row from an incorrectly-typed tuple of values""" self._setup(table_types) t = table_types.Table([self.a, self.b]) with pytest.raises(ValueError): t[1] = ('abc', 'def') def test_set_new_col_new_table(self, table_types): """Create a new column in empty table using the item access syntax""" self._setup(table_types) t = table_types.Table() t['aa'] = self.a # Test that the new column name is 'aa' and that the values match assert np.all(t['aa'] == self.a) assert t.colnames == ['aa'] def test_set_new_col_new_table_quantity(self, table_types): """Create a new column (from a quantity) in empty table using the item access syntax""" self._setup(table_types) t = table_types.Table() t['aa'] = np.array([1, 2, 3]) * u.m assert np.all(t['aa'] == np.array([1, 2, 3])) assert t['aa'].unit == u.m t['bb'] = 3 * u.m assert np.all(t['bb'] == 3) assert t['bb'].unit == u.m def test_set_new_col_existing_table(self, table_types): """Create a new column in an existing table using the item access syntax""" self._setup(table_types) t = table_types.Table([self.a]) # Add a column t['bb'] = self.b assert np.all(t['bb'] == self.b) assert t.colnames == ['a', 'bb'] assert t['bb'].meta == self.b.meta assert t['bb'].format == self.b.format # Add another column t['c'] = t['a'] assert np.all(t['c'] == t['a']) assert t.colnames == ['a', 'bb', 'c'] assert t['c'].meta == t['a'].meta assert t['c'].format == t['a'].format # Add a multi-dimensional column t['d'] = table_types.Column(np.arange(12).reshape(3, 2, 2)) assert t['d'].shape == (3, 2, 2) assert t['d'][0, 0, 1] == 1 # Add column from a list t['e'] = ['hello', 'the', 'world'] assert np.all(t['e'] == np.array(['hello', 'the', 'world'])) # Make sure setting existing column still works t['e'] = ['world', 'hello', 'the'] assert np.all(t['e'] == np.array(['world', 'hello', 'the'])) # Add a column via broadcasting t['f'] = 10 assert np.all(t['f'] == 10) # Add a column from a Quantity t['g'] = np.array([1, 2, 3]) * u.m assert np.all(t['g'].data == np.array([1, 2, 3])) assert t['g'].unit == u.m # Add a column from a (scalar) Quantity t['g'] = 3 * u.m assert np.all(t['g'].data == 3) assert t['g'].unit == u.m def test_set_new_unmasked_col_existing_table(self, table_types): """Create a new column in an existing table using the item access syntax""" self._setup(table_types) t = table_types.Table([self.a]) # masked or unmasked b = table.Column(name='b', data=[1, 2, 3]) # unmasked t['b'] = b assert np.all(t['b'] == b) def test_set_new_masked_col_existing_table(self, table_types): """Create a new column in an existing table using the item access syntax""" self._setup(table_types) t = table_types.Table([self.a]) # masked or unmasked b = table.MaskedColumn(name='b', data=[1, 2, 3]) # masked t['b'] = b assert np.all(t['b'] == b) def test_set_new_col_existing_table_fail(self, table_types): """Generate failure when creating a new column using the item access syntax""" self._setup(table_types) t = table_types.Table([self.a]) # Wrong size with pytest.raises(ValueError): t['b'] = [1, 2] @pytest.mark.usefixtures('table_types') class TestEmptyData(): def test_1(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='a', dtype=int, length=100)) assert len(t['a']) == 100 def test_2(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='a', dtype=int, shape=(3, ), length=100)) assert len(t['a']) == 100 def test_3(self, table_types): t = table_types.Table() # length is not given t.add_column(table_types.Column(name='a', dtype=int)) assert len(t['a']) == 0 def test_4(self, table_types): t = table_types.Table() # length is not given t.add_column(table_types.Column(name='a', dtype=int, shape=(3, 4))) assert len(t['a']) == 0 def test_5(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='a')) # dtype is not specified assert len(t['a']) == 0 def test_add_via_setitem_and_slice(self, table_types): """Test related to #3023 where a MaskedColumn is created with name=None and then gets changed to name='a'. After PR #2790 this test fails without the #3023 fix.""" t = table_types.Table() t['a'] = table_types.Column([1, 2, 3]) t2 = t[:] assert t2.colnames == t.colnames @pytest.mark.usefixtures('table_types') class TestNewFromColumns(): def test_simple(self, table_types): cols = [table_types.Column(name='a', data=[1, 2, 3]), table_types.Column(name='b', data=[4, 5, 6], dtype=np.float32)] t = table_types.Table(cols) assert np.all(t['a'].data == np.array([1, 2, 3])) assert np.all(t['b'].data == np.array([4, 5, 6], dtype=np.float32)) assert type(t['b'][1]) is np.float32 def test_from_np_array(self, table_types): cols = [table_types.Column(name='a', data=np.array([1, 2, 3], dtype=np.int64), dtype=np.float64), table_types.Column(name='b', data=np.array([4, 5, 6], dtype=np.float32))] t = table_types.Table(cols) assert np.all(t['a'] == np.array([1, 2, 3], dtype=np.float64)) assert np.all(t['b'] == np.array([4, 5, 6], dtype=np.float32)) assert type(t['a'][1]) is np.float64 assert type(t['b'][1]) is np.float32 def test_size_mismatch(self, table_types): cols = [table_types.Column(name='a', data=[1, 2, 3]), table_types.Column(name='b', data=[4, 5, 6, 7])] with pytest.raises(ValueError): table_types.Table(cols) def test_name_none(self, table_types): """Column with name=None can init a table whether or not names are supplied""" c = table_types.Column(data=[1, 2], name='c') d = table_types.Column(data=[3, 4]) t = table_types.Table([c, d], names=(None, 'd')) assert t.colnames == ['c', 'd'] t = table_types.Table([c, d]) assert t.colnames == ['c', 'col1'] @pytest.mark.usefixtures('table_types') class TestReverse(): def test_reverse(self, table_types): t = table_types.Table([[1, 2, 3], ['a', 'b', 'cc']]) t.reverse() assert np.all(t['col0'] == np.array([3, 2, 1])) assert np.all(t['col1'] == np.array(['cc', 'b', 'a'])) t2 = table_types.Table(t, copy=False) assert np.all(t2['col0'] == np.array([3, 2, 1])) assert np.all(t2['col1'] == np.array(['cc', 'b', 'a'])) t2 = table_types.Table(t, copy=True) assert np.all(t2['col0'] == np.array([3, 2, 1])) assert np.all(t2['col1'] == np.array(['cc', 'b', 'a'])) t2.sort('col0') assert np.all(t2['col0'] == np.array([1, 2, 3])) assert np.all(t2['col1'] == np.array(['a', 'b', 'cc'])) def test_reverse_big(self, table_types): x = np.arange(10000) y = x + 1 t = table_types.Table([x, y], names=('x', 'y')) t.reverse() assert np.all(t['x'] == x[::-1]) assert np.all(t['y'] == y[::-1]) @pytest.mark.usefixtures('table_types') class TestColumnAccess(): def test_1(self, table_types): t = table_types.Table() with pytest.raises(KeyError): t['a'] def test_2(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='a', data=[1, 2, 3])) assert np.all(t['a'] == np.array([1, 2, 3])) with pytest.raises(KeyError): t['b'] # column does not exist def test_itercols(self, table_types): names = ['a', 'b', 'c'] t = table_types.Table([[1], [2], [3]], names=names) for name, col in zip(names, t.itercols()): assert name == col.name assert isinstance(col, table_types.Column) @pytest.mark.usefixtures('table_types') class TestAddLength(SetupData): def test_right_length(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) t.add_column(self.b) def test_too_long(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) with pytest.raises(ValueError): t.add_column(table_types.Column(name='b', data=[4, 5, 6, 7])) # data too long def test_too_short(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) with pytest.raises(ValueError): t.add_column(table_types.Column(name='b', data=[4, 5])) # data too short @pytest.mark.usefixtures('table_types') class TestAddPosition(SetupData): def test_1(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a, 0) def test_2(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a, 1) def test_3(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a, -1) def test_5(self, table_types): self._setup(table_types) t = table_types.Table() with pytest.raises(ValueError): t.index_column('b') def test_6(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a) t.add_column(self.b) assert t.columns.keys() == ['a', 'b'] def test_7(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) t.add_column(self.b, t.index_column('a')) assert t.columns.keys() == ['b', 'a'] def test_8(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) t.add_column(self.b, t.index_column('a') + 1) assert t.columns.keys() == ['a', 'b'] def test_9(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a) t.add_column(self.b, t.index_column('a') + 1) t.add_column(self.c, t.index_column('b')) assert t.columns.keys() == ['a', 'c', 'b'] def test_10(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a) ia = t.index_column('a') t.add_column(self.b, ia + 1) t.add_column(self.c, ia) assert t.columns.keys() == ['c', 'a', 'b'] @pytest.mark.usefixtures('table_types') class TestAddName(SetupData): def test_override_name(self, table_types): self._setup(table_types) t = table_types.Table() # Check that we can override the name of the input column in the Table t.add_column(self.a, name='b') t.add_column(self.b, name='a') assert t.columns.keys() == ['b', 'a'] # Check that we did not change the name of the input column assert self.a.info.name == 'a' assert self.b.info.name == 'b' # Now test with an input column from another table t2 = table_types.Table() t2.add_column(t['a'], name='c') assert t2.columns.keys() == ['c'] # Check that we did not change the name of the input column assert t.columns.keys() == ['b', 'a'] # Check that we can give a name if none was present col = table_types.Column([1, 2, 3]) t.add_column(col, name='c') assert t.columns.keys() == ['b', 'a', 'c'] def test_default_name(self, table_types): t = table_types.Table() col = table_types.Column([1, 2, 3]) t.add_column(col) assert t.columns.keys() == ['col0'] @pytest.mark.usefixtures('table_types') class TestInitFromTable(SetupData): def test_from_table_cols(self, table_types): """Ensure that using cols from an existing table gives a clean copy. """ self._setup(table_types) t = self.t cols = t.columns # Construct Table with cols via Table._new_from_cols t2a = table_types.Table([cols['a'], cols['b'], self.c]) # Construct with add_column t2b = table_types.Table() t2b.add_column(cols['a']) t2b.add_column(cols['b']) t2b.add_column(self.c) t['a'][1] = 20 t['b'][1] = 21 for t2 in [t2a, t2b]: t2['a'][2] = 10 t2['b'][2] = 11 t2['c'][2] = 12 t2.columns['a'].meta['aa'][3] = 10 assert np.all(t['a'] == np.array([1, 20, 3])) assert np.all(t['b'] == np.array([4, 21, 6])) assert np.all(t2['a'] == np.array([1, 2, 10])) assert np.all(t2['b'] == np.array([4, 5, 11])) assert np.all(t2['c'] == np.array([7, 8, 12])) assert t2['a'].name == 'a' assert t2.columns['a'].meta['aa'][3] == 10 assert t.columns['a'].meta['aa'][3] == 3 @pytest.mark.usefixtures('table_types') class TestAddColumns(SetupData): def test_add_columns1(self, table_types): self._setup(table_types) t = table_types.Table() t.add_columns([self.a, self.b, self.c]) assert t.colnames == ['a', 'b', 'c'] def test_add_columns2(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.add_columns([self.c, self.d]) assert t.colnames == ['a', 'b', 'c', 'd'] assert np.all(t['c'] == np.array([7, 8, 9])) def test_add_columns3(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.add_columns([self.c, self.d], indexes=[1, 0]) assert t.colnames == ['d', 'a', 'c', 'b'] def test_add_columns4(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.add_columns([self.c, self.d], indexes=[0, 0]) assert t.colnames == ['c', 'd', 'a', 'b'] def test_add_columns5(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.add_columns([self.c, self.d], indexes=[2, 2]) assert t.colnames == ['a', 'b', 'c', 'd'] def test_add_columns6(self, table_types): """Check that we can override column names.""" self._setup(table_types) t = table_types.Table() t.add_columns([self.a, self.b, self.c], names=['b', 'c', 'a']) assert t.colnames == ['b', 'c', 'a'] def test_add_columns7(self, table_types): """Check that default names are used when appropriate.""" t = table_types.Table() col0 = table_types.Column([1, 2, 3]) col1 = table_types.Column([4, 5, 3]) t.add_columns([col0, col1]) assert t.colnames == ['col0', 'col1'] def test_add_duplicate_column(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a) with pytest.raises(ValueError): t.add_column(table_types.Column(name='a', data=[0, 1, 2])) t.add_column(table_types.Column(name='a', data=[0, 1, 2]), rename_duplicate=True) t.add_column(self.b) t.add_column(self.c) assert t.colnames == ['a', 'a_1', 'b', 'c'] t.add_column(table_types.Column(name='a', data=[0, 1, 2]), rename_duplicate=True) assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2'] # test adding column from a separate Table t1 = table_types.Table() t1.add_column(self.a) with pytest.raises(ValueError): t.add_column(t1['a']) t.add_column(t1['a'], rename_duplicate=True) t1['a'][0] = 100 # Change original column assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2', 'a_3'] assert t1.colnames == ['a'] # Check new column didn't change (since name conflict forced a copy) assert t['a_3'][0] == self.a[0] def test_add_duplicate_columns(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b, self.c]) with pytest.raises(ValueError): t.add_columns([table_types.Column(name='a', data=[0, 1, 2]), table_types.Column(name='b', data=[0, 1, 2])]) t.add_columns([table_types.Column(name='a', data=[0, 1, 2]), table_types.Column(name='b', data=[0, 1, 2])], rename_duplicate=True) t.add_column(self.d) assert t.colnames == ['a', 'b', 'c', 'a_1', 'b_1', 'd'] @pytest.mark.usefixtures('table_types') class TestAddRow(SetupData): @property def b(self): if self._column_type is not None: if not hasattr(self, '_b'): self._b = self._column_type(name='b', data=[4.0, 5.1, 6.2]) return self._b @property def c(self): if self._column_type is not None: if not hasattr(self, '_c'): self._c = self._column_type(name='c', data=['7', '8', '9']) return self._c @property def d(self): if self._column_type is not None: if not hasattr(self, '_d'): self._d = self._column_type(name='d', data=[[1, 2], [3, 4], [5, 6]]) return self._d @property def t(self): if self._table_type is not None: if not hasattr(self, '_t'): self._t = self._table_type([self.a, self.b, self.c]) return self._t def test_add_none_to_empty_table(self, table_types): self._setup(table_types) t = table_types.Table(names=('a', 'b', 'c'), dtype=('(2,)i', 'S4', 'O')) t.add_row() assert np.all(t['a'][0] == [0, 0]) assert t['b'][0] == '' assert t['c'][0] == 0 t.add_row() assert np.all(t['a'][1] == [0, 0]) assert t['b'][1] == '' assert t['c'][1] == 0 def test_add_stuff_to_empty_table(self, table_types): self._setup(table_types) t = table_types.Table(names=('a', 'b', 'obj'), dtype=('(2,)i', 'S8', 'O')) t.add_row([[1, 2], 'hello', 'world']) assert np.all(t['a'][0] == [1, 2]) assert t['b'][0] == 'hello' assert t['obj'][0] == 'world' # Make sure it is not repeating last row but instead # adding zeros (as documented) t.add_row() assert np.all(t['a'][1] == [0, 0]) assert t['b'][1] == '' assert t['obj'][1] == 0 def test_add_table_row(self, table_types): self._setup(table_types) t = self.t t['d'] = self.d t2 = table_types.Table([self.a, self.b, self.c, self.d]) t.add_row(t2[0]) assert len(t) == 4 assert np.all(t['a'] == np.array([1, 2, 3, 1])) assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0])) assert np.all(t['c'] == np.array(['7', '8', '9', '7'])) assert np.all(t['d'] == np.array([[1, 2], [3, 4], [5, 6], [1, 2]])) def test_add_table_row_obj(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b, self.obj]) t.add_row([1, 4.0, [10]]) assert len(t) == 4 assert np.all(t['a'] == np.array([1, 2, 3, 1])) assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0])) assert np.all(t['obj'] == np.array([1, 'string', 3, [10]], dtype='O')) def test_add_qtable_row_multidimensional(self): q = [[1, 2], [3, 4]] * u.m qt = table.QTable([q]) qt.add_row(([5, 6] * u.km,)) assert np.all(qt['col0'] == [[1, 2], [3, 4], [5000, 6000]] * u.m) def test_add_with_tuple(self, table_types): self._setup(table_types) t = self.t t.add_row((4, 7.2, '1')) assert len(t) == 4 assert np.all(t['a'] == np.array([1, 2, 3, 4])) assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2])) assert np.all(t['c'] == np.array(['7', '8', '9', '1'])) def test_add_with_list(self, table_types): self._setup(table_types) t = self.t t.add_row([4, 7.2, '10']) assert len(t) == 4 assert np.all(t['a'] == np.array([1, 2, 3, 4])) assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2])) assert np.all(t['c'] == np.array(['7', '8', '9', '1'])) def test_add_with_dict(self, table_types): self._setup(table_types) t = self.t t.add_row({'a': 4, 'b': 7.2}) assert len(t) == 4 assert np.all(t['a'] == np.array([1, 2, 3, 4])) assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2])) if t.masked: assert np.all(t['c'] == np.array(['7', '8', '9', '7'])) else: assert np.all(t['c'] == np.array(['7', '8', '9', ''])) def test_add_with_none(self, table_types): self._setup(table_types) t = self.t t.add_row() assert len(t) == 4 assert np.all(t['a'].data == np.array([1, 2, 3, 0])) assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 0.0])) assert np.all(t['c'].data == np.array(['7', '8', '9', ''])) def test_add_missing_column(self, table_types): self._setup(table_types) t = self.t with pytest.raises(ValueError): t.add_row({'bad_column': 1}) def test_wrong_size_tuple(self, table_types): self._setup(table_types) t = self.t with pytest.raises(ValueError): t.add_row((1, 2)) def test_wrong_vals_type(self, table_types): self._setup(table_types) t = self.t with pytest.raises(TypeError): t.add_row(1) def test_add_row_failures(self, table_types): self._setup(table_types) t = self.t t_copy = table_types.Table(t, copy=True) # Wrong number of columns try: t.add_row([1, 2, 3, 4]) except ValueError: pass assert len(t) == 3 assert np.all(t.as_array() == t_copy.as_array()) # Wrong data type try: t.add_row(['one', 2, 3]) except ValueError: pass assert len(t) == 3 assert np.all(t.as_array() == t_copy.as_array()) def test_insert_table_row(self, table_types): """ Light testing of Table.insert_row() method. The deep testing is done via the add_row() tests which calls insert_row(index=len(self), ...), so here just test that the added index parameter is handled correctly. """ self._setup(table_types) row = (10, 40.0, 'x', [10, 20]) for index in range(-3, 4): indices = np.insert(np.arange(3), index, 3) t = table_types.Table([self.a, self.b, self.c, self.d]) t2 = t.copy() t.add_row(row) # By now we know this works t2.insert_row(index, row) for name in t.colnames: if t[name].dtype.kind == 'f': assert np.allclose(t[name][indices], t2[name]) else: assert np.all(t[name][indices] == t2[name]) for index in (-4, 4): t = table_types.Table([self.a, self.b, self.c, self.d]) with pytest.raises(IndexError): t.insert_row(index, row) @pytest.mark.usefixtures('table_types') class TestTableColumn(SetupData): def test_column_view(self, table_types): self._setup(table_types) t = self.t a = t.columns['a'] a[2] = 10 assert t['a'][2] == 10 @pytest.mark.usefixtures('table_types') class TestArrayColumns(SetupData): def test_1d(self, table_types): self._setup(table_types) b = table_types.Column(name='b', dtype=int, shape=(2, ), length=3) t = table_types.Table([self.a]) t.add_column(b) assert t['b'].shape == (3, 2) assert t['b'][0].shape == (2, ) def test_2d(self, table_types): self._setup(table_types) b = table_types.Column(name='b', dtype=int, shape=(2, 4), length=3) t = table_types.Table([self.a]) t.add_column(b) assert t['b'].shape == (3, 2, 4) assert t['b'][0].shape == (2, 4) def test_3d(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) b = table_types.Column(name='b', dtype=int, shape=(2, 4, 6), length=3) t.add_column(b) assert t['b'].shape == (3, 2, 4, 6) assert t['b'][0].shape == (2, 4, 6) @pytest.mark.usefixtures('table_types') class TestRemove(SetupData): @property def t(self): if self._table_type is not None: if not hasattr(self, '_t'): self._t = self._table_type([self.a]) return self._t @property def t2(self): if self._table_type is not None: if not hasattr(self, '_t2'): self._t2 = self._table_type([self.a, self.b, self.c]) return self._t2 def test_1(self, table_types): self._setup(table_types) self.t.remove_columns('a') assert self.t.columns.keys() == [] assert self.t.as_array() is None def test_2(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.remove_columns('a') assert self.t.columns.keys() == ['b'] assert self.t.dtype.names == ('b',) assert np.all(self.t['b'] == np.array([4, 5, 6])) def test_3(self, table_types): """Check remove_columns works for a single column with a name of more than one character. Regression test against #2699""" self._setup(table_types) self.t['new_column'] = self.t['a'] assert 'new_column' in self.t.columns.keys() self.t.remove_columns('new_column') assert 'new_column' not in self.t.columns.keys() def test_remove_nonexistent_row(self, table_types): self._setup(table_types) with pytest.raises(IndexError): self.t.remove_row(4) def test_remove_row_0(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) self.t.remove_row(0) assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['b'] == np.array([5, 6])) def test_remove_row_1(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) self.t.remove_row(1) assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['a'] == np.array([1, 3])) def test_remove_row_2(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) self.t.remove_row(2) assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['c'] == np.array([7, 8])) def test_remove_row_slice(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) self.t.remove_rows(slice(0, 2, 1)) assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['c'] == np.array([9])) def test_remove_row_list(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) self.t.remove_rows([0, 2]) assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['c'] == np.array([8])) def test_remove_row_preserves_meta(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.remove_rows([0, 2]) assert self.t['a'].meta == {'aa': [0, 1, 2, 3, 4]} assert self.t.dtype == np.dtype([(str('a'), 'int'), (str('b'), 'int')]) def test_delitem_row(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) del self.t[1] assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['a'] == np.array([1, 3])) @pytest.mark.parametrize("idx", [[0, 2], np.array([0, 2])]) def test_delitem_row_list(self, table_types, idx): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) del self.t[idx] assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['c'] == np.array([8])) def test_delitem_row_slice(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) del self.t[0:2] assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['c'] == np.array([9])) def test_delitem_row_fail(self, table_types): self._setup(table_types) with pytest.raises(IndexError): del self.t[4] def test_delitem_row_float(self, table_types): self._setup(table_types) with pytest.raises(IndexError): del self.t[1.] def test_delitem1(self, table_types): self._setup(table_types) del self.t['a'] assert self.t.columns.keys() == [] assert self.t.as_array() is None def test_delitem2(self, table_types): self._setup(table_types) del self.t2['b'] assert self.t2.colnames == ['a', 'c'] def test_delitems(self, table_types): self._setup(table_types) del self.t2['a', 'b'] assert self.t2.colnames == ['c'] def test_delitem_fail(self, table_types): self._setup(table_types) with pytest.raises(KeyError): del self.t['d'] @pytest.mark.usefixtures('table_types') class TestKeep(SetupData): def test_1(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.keep_columns([]) assert t.columns.keys() == [] assert t.as_array() is None def test_2(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.keep_columns('b') assert t.columns.keys() == ['b'] assert t.dtype.names == ('b',) assert np.all(t['b'] == np.array([4, 5, 6])) @pytest.mark.usefixtures('table_types') class TestRename(SetupData): def test_1(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) t.rename_column('a', 'b') assert t.columns.keys() == ['b'] assert t.dtype.names == ('b',) assert np.all(t['b'] == np.array([1, 2, 3])) def test_2(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.rename_column('a', 'c') t.rename_column('b', 'a') assert t.columns.keys() == ['c', 'a'] assert t.dtype.names == ('c', 'a') if t.masked: assert t.mask.dtype.names == ('c', 'a') assert np.all(t['c'] == np.array([1, 2, 3])) assert np.all(t['a'] == np.array([4, 5, 6])) def test_rename_by_attr(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t['a'].name = 'c' t['b'].name = 'a' assert t.columns.keys() == ['c', 'a'] assert t.dtype.names == ('c', 'a') assert np.all(t['c'] == np.array([1, 2, 3])) assert np.all(t['a'] == np.array([4, 5, 6])) @pytest.mark.usefixtures('table_types') class TestSort(): def test_single(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='a', data=[2, 1, 3])) t.add_column(table_types.Column(name='b', data=[6, 5, 4])) t.add_column(table_types.Column(name='c', data=[(1, 2), (3, 4), (4, 5)])) assert np.all(t['a'] == np.array([2, 1, 3])) assert np.all(t['b'] == np.array([6, 5, 4])) t.sort('a') assert np.all(t['a'] == np.array([1, 2, 3])) assert np.all(t['b'] == np.array([5, 6, 4])) assert np.all(t['c'] == np.array([[3, 4], [1, 2], [4, 5]])) t.sort('b') assert np.all(t['a'] == np.array([3, 1, 2])) assert np.all(t['b'] == np.array([4, 5, 6])) assert np.all(t['c'] == np.array([[4, 5], [3, 4], [1, 2]])) def test_single_big(self, table_types): """Sort a big-ish table with a non-trivial sort order""" x = np.arange(10000) y = np.sin(x) t = table_types.Table([x, y], names=('x', 'y')) t.sort('y') idx = np.argsort(y) assert np.all(t['x'] == x[idx]) assert np.all(t['y'] == y[idx]) def test_empty(self, table_types): t = table_types.Table([[], []], dtype=['f4', 'U1']) t.sort('col1') def test_multiple(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1])) t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4])) assert np.all(t['a'] == np.array([2, 1, 3, 2, 3, 1])) assert np.all(t['b'] == np.array([6, 5, 4, 3, 5, 4])) t.sort(['a', 'b']) assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3])) assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5])) t.sort(['b', 'a']) assert np.all(t['a'] == np.array([2, 1, 3, 1, 3, 2])) assert np.all(t['b'] == np.array([3, 4, 4, 5, 5, 6])) t.sort(('a', 'b')) assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3])) assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5])) def test_multiple_with_bytes(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"])) t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"])) t.add_column(table_types.Column(name='tel', data=[12, 15, 19])) t.sort(['name', 'firstname']) assert np.all([t['firstname'] == np.array([b"John", b"Jo", b"Max"])]) assert np.all([t['name'] == np.array([b"Jackson", b"Miller", b"Miller"])]) assert np.all([t['tel'] == np.array([19, 15, 12])]) def test_multiple_with_unicode(self, table_types): # Before Numpy 1.6.2, sorting with multiple column names # failed when a unicode column was present. t = table_types.Table() t.add_column(table_types.Column( name='firstname', data=[str(x) for x in ["Max", "Jo", "John"]])) t.add_column(table_types.Column( name='name', data=[str(x) for x in ["Miller", "Miller", "Jackson"]])) t.add_column(table_types.Column(name='tel', data=[12, 15, 19])) t.sort(['name', 'firstname']) assert np.all([t['firstname'] == np.array( [str(x) for x in ["John", "Jo", "Max"]])]) assert np.all([t['name'] == np.array( [str(x) for x in ["Jackson", "Miller", "Miller"]])]) assert np.all([t['tel'] == np.array([19, 15, 12])]) def test_argsort(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1])) t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4])) assert np.all(t.argsort() == t.as_array().argsort()) i0 = t.argsort('a') i1 = t.as_array().argsort(order=['a']) assert np.all(t['a'][i0] == t['a'][i1]) i0 = t.argsort(['a', 'b']) i1 = t.as_array().argsort(order=['a', 'b']) assert np.all(t['a'][i0] == t['a'][i1]) assert np.all(t['b'][i0] == t['b'][i1]) def test_argsort_bytes(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"])) t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"])) t.add_column(table_types.Column(name='tel', data=[12, 15, 19])) assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0])) def test_argsort_unicode(self, table_types): # Before Numpy 1.6.2, sorting with multiple column names # failed when a unicode column was present. t = table_types.Table() t.add_column(table_types.Column( name='firstname', data=[str(x) for x in ["Max", "Jo", "John"]])) t.add_column(table_types.Column( name='name', data=[str(x) for x in ["Miller", "Miller", "Jackson"]])) t.add_column(table_types.Column(name='tel', data=[12, 15, 19])) assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0])) def test_rebuild_column_view_then_rename(self, table_types): """ Issue #2039 where renaming fails after any method that calls _rebuild_table_column_view (this includes sort and add_row). """ t = table_types.Table([[1]], names=('a',)) assert t.colnames == ['a'] assert t.dtype.names == ('a',) t.add_row((2,)) assert t.colnames == ['a'] assert t.dtype.names == ('a',) t.rename_column('a', 'b') assert t.colnames == ['b'] assert t.dtype.names == ('b',) t.sort('b') assert t.colnames == ['b'] assert t.dtype.names == ('b',) t.rename_column('b', 'c') assert t.colnames == ['c'] assert t.dtype.names == ('c',) @pytest.mark.usefixtures('table_types') class TestIterator(): def test_iterator(self, table_types): d = np.array([(2, 1), (3, 6), (4, 5)], dtype=[(str('a'), 'i4'), (str('b'), 'i4')]) t = table_types.Table(d) if t.masked: with pytest.raises(ValueError): t[0] == d[0] else: for row, np_row in zip(t, d): assert np.all(row == np_row) @pytest.mark.usefixtures('table_types') class TestSetMeta(): def test_set_meta(self, table_types): d = table_types.Table(names=('a', 'b')) d.meta['a'] = 1 d.meta['b'] = 1 d.meta['c'] = 1 d.meta['d'] = 1 assert list(d.meta.keys()) == ['a', 'b', 'c', 'd'] @pytest.mark.usefixtures('table_types') class TestConvertNumpyArray(): def test_convert_numpy_array(self, table_types): d = table_types.Table([[1, 2], [3, 4]], names=('a', 'b')) np_data = np.array(d) if table_types.Table is not MaskedTable: assert np.all(np_data == d.as_array()) assert np_data is not d.as_array() assert d.colnames == list(np_data.dtype.names) np_data = np.array(d, copy=False) if table_types.Table is not MaskedTable: assert np.all(np_data == d.as_array()) assert d.colnames == list(np_data.dtype.names) with pytest.raises(ValueError): np_data = np.array(d, dtype=[(str('c'), 'i8'), (str('d'), 'i8')]) def test_as_array_byteswap(self, table_types): """Test for https://github.com/astropy/astropy/pull/4080""" byte_orders = ('>', '<') native_order = byte_orders[sys.byteorder == 'little'] for order in byte_orders: col = table_types.Column([1.0, 2.0], name='a', dtype=order + 'f8') t = table_types.Table([col]) arr = t.as_array() assert arr['a'].dtype.byteorder in (native_order, '=') arr = t.as_array(keep_byteorder=True) if order == native_order: assert arr['a'].dtype.byteorder in (order, '=') else: assert arr['a'].dtype.byteorder == order def test_byteswap_fits_array(self, table_types): """ Test for https://github.com/astropy/astropy/pull/4080, demonstrating that FITS tables are converted to native byte order. """ non_native_order = ('>', '<')[sys.byteorder != 'little'] filename = get_pkg_data_filename('data/tb.fits', 'astropy.io.fits.tests') t = table_types.Table.read(filename) arr = t.as_array() for idx in range(len(arr.dtype)): assert arr.dtype[idx].byteorder != non_native_order with fits.open(filename, character_as_bytes=True) as hdul: data = hdul[1].data for colname in data.columns.names: assert np.all(data[colname] == arr[colname]) arr2 = t.as_array(keep_byteorder=True) for colname in data.columns.names: assert (data[colname].dtype.byteorder == arr2[colname].dtype.byteorder) def _assert_copies(t, t2, deep=True): assert t.colnames == t2.colnames np.testing.assert_array_equal(t.as_array(), t2.as_array()) assert t.meta == t2.meta for col, col2 in zip(t.columns.values(), t2.columns.values()): if deep: assert not np.may_share_memory(col, col2) else: assert np.may_share_memory(col, col2) def test_copy(): t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y']) t2 = t.copy() _assert_copies(t, t2) def test_copy_masked(): t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'], masked=True, meta={'name': 'test'}) t['x'].mask == [True, False, True] t2 = t.copy() _assert_copies(t, t2) def test_copy_protocol(): t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y']) t2 = copy.copy(t) t3 = copy.deepcopy(t) _assert_copies(t, t2, deep=False) _assert_copies(t, t3) def test_disallow_inequality_comparisons(): """ Regression test for #828 - disallow comparison operators on whole Table """ t = table.Table() with pytest.raises(TypeError): t > 2 with pytest.raises(TypeError): t < 1.1 with pytest.raises(TypeError): t >= 5.5 with pytest.raises(TypeError): t <= -1.1 def test_equality(): t = table.Table.read([' a b c d', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 2 b 6.0 2', ' 2 a 4.0 3', ' 0 a 0.0 4', ' 1 b 3.0 5', ' 1 a 2.0 6', ' 1 a 1.0 7', ], format='ascii') # All rows are equal assert np.all(t == t) # Assert no rows are different assert not np.any(t != t) # Check equality result for a given row assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool)) # Check inequality result for a given row assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool)) t2 = table.Table.read([' a b c d', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 3 b 6.0 2', ' 2 a 4.0 3', ' 0 a 1.0 4', ' 1 b 3.0 5', ' 1 c 2.0 6', ' 1 a 1.0 7', ], format='ascii') # In the above cases, Row.__eq__ gets called, but now need to make sure # Table.__eq__ also gets called. assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool)) # Check that comparing to a structured array works assert np.all((t == t2.as_array()) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) assert np.all((t.as_array() == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) def test_equality_masked(): t = table.Table.read([' a b c d', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 2 b 6.0 2', ' 2 a 4.0 3', ' 0 a 0.0 4', ' 1 b 3.0 5', ' 1 a 2.0 6', ' 1 a 1.0 7', ], format='ascii') # Make into masked table t = table.Table(t, masked=True) # All rows are equal assert np.all(t == t) # Assert no rows are different assert not np.any(t != t) # Check equality result for a given row assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool)) # Check inequality result for a given row assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool)) t2 = table.Table.read([' a b c d', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 3 b 6.0 2', ' 2 a 4.0 3', ' 0 a 1.0 4', ' 1 b 3.0 5', ' 1 c 2.0 6', ' 1 a 1.0 7', ], format='ascii') # In the above cases, Row.__eq__ gets called, but now need to make sure # Table.__eq__ also gets called. assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool)) # Check that masking a value causes the row to differ t.mask['a'][0] = True assert np.all((t == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) assert np.all((t != t2) == np.array([1, 0, 1, 0, 1, 0, 1, 0], dtype=bool)) # Check that comparing to a structured array works assert np.all((t == t2.as_array()) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) @pytest.mark.xfail def test_equality_masked_bug(): """ This highlights a Numpy bug. Once it works, it can be moved into the test_equality_masked test. Related Numpy bug report: https://github.com/numpy/numpy/issues/3840 """ t = table.Table.read([' a b c d', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 2 b 6.0 2', ' 2 a 4.0 3', ' 0 a 0.0 4', ' 1 b 3.0 5', ' 1 a 2.0 6', ' 1 a 1.0 7', ], format='ascii') t = table.Table(t, masked=True) t2 = table.Table.read([' a b c d', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 3 b 6.0 2', ' 2 a 4.0 3', ' 0 a 1.0 4', ' 1 b 3.0 5', ' 1 c 2.0 6', ' 1 a 1.0 7', ], format='ascii') assert np.all((t.as_array() == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) # Check that the meta descriptor is working as expected. The MetaBaseTest class # takes care of defining all the tests, and we simply have to define the class # and any minimal set of args to pass. from ...utils.tests.test_metadata import MetaBaseTest class TestMetaTable(MetaBaseTest): test_class = table.Table args = () def test_unicode_content(): # If we don't have unicode literals then return if isinstance('', bytes): return # Define unicode literals string_a = 'астрономическая питона' string_b = 'миллиарды световых лет' a = table.Table( [[string_a, 2], [string_b, 3]], names=('a', 'b')) assert string_a in str(a) # This only works because the coding of this file is utf-8, which # matches the default encoding of Table.__str__ assert string_a.encode('utf-8') in bytes(a) def test_unicode_policy(): t = table.Table.read([' a b c d', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 2 b 6.0 2', ' 2 a 4.0 3', ' 0 a 0.0 4', ' 1 b 3.0 5', ' 1 a 2.0 6', ' 1 a 1.0 7', ], format='ascii') assert_follows_unicode_guidelines(t) def test_unicode_bytestring_conversion(table_types): t = table_types.Table([['abc'], ['def'], [1]], dtype=('S', 'U', 'i')) assert t['col0'].dtype.kind == 'S' assert t['col1'].dtype.kind == 'U' assert t['col2'].dtype.kind == 'i' t1 = t.copy() t1.convert_unicode_to_bytestring() assert t1['col0'].dtype.kind == 'S' assert t1['col1'].dtype.kind == 'S' assert t1['col2'].dtype.kind == 'i' assert t1['col0'][0] == 'abc' assert t1['col1'][0] == 'def' assert t1['col2'][0] == 1 t1 = t.copy() t1.convert_bytestring_to_unicode() assert t1['col0'].dtype.kind == 'U' assert t1['col1'].dtype.kind == 'U' assert t1['col2'].dtype.kind == 'i' assert t1['col0'][0] == str('abc') assert t1['col1'][0] == str('def') assert t1['col2'][0] == 1 def test_table_deletion(): """ Regression test for the reference cycle discussed in https://github.com/astropy/astropy/issues/2877 """ deleted = set() # A special table subclass which leaves a record when it is finalized class TestTable(table.Table): def __del__(self): deleted.add(id(self)) t = TestTable({'a': [1, 2, 3]}) the_id = id(t) assert t['a'].parent_table is t del t # Cleanup gc.collect() assert the_id in deleted def test_nested_iteration(): """ Regression test for issue 3358 where nested iteration over a single table fails. """ t = table.Table([[0, 1]], names=['a']) out = [] for r1 in t: for r2 in t: out.append((r1['a'], r2['a'])) assert out == [(0, 0), (0, 1), (1, 0), (1, 1)] def test_table_init_from_degenerate_arrays(table_types): t = table_types.Table(np.array([])) assert len(t.columns) == 0 with pytest.raises(ValueError): t = table_types.Table(np.array(0)) t = table_types.Table(np.array([1, 2, 3])) assert len(t.columns) == 3 @pytest.mark.skipif('not HAS_PANDAS') class TestPandas: def test_simple(self): t = table.Table() for endian in ['<', '>']: for kind in ['f', 'i']: for byte in ['2', '4', '8']: dtype = np.dtype(endian + kind + byte) x = np.array([1, 2, 3], dtype=dtype) t[endian + kind + byte] = x t['u'] = ['a', 'b', 'c'] t['s'] = ['a', 'b', 'c'] d = t.to_pandas() for column in t.columns: if column == 'u': assert np.all(t['u'] == np.array(['a', 'b', 'c'])) assert d[column].dtype == np.dtype("O") # upstream feature of pandas elif column == 's': assert np.all(t['s'] == np.array(['a', 'b', 'c'])) assert d[column].dtype == np.dtype("O") # upstream feature of pandas else: # We should be able to compare exact values here assert np.all(t[column] == d[column]) if t[column].dtype.byteorder in ('=', '|'): assert d[column].dtype == t[column].dtype else: assert d[column].dtype == t[column].byteswap().newbyteorder().dtype # Regression test for astropy/astropy#1156 - the following code gave a # ValueError: Big-endian buffer not supported on little-endian # compiler. We now automatically swap the endian-ness to native order # upon adding the arrays to the data frame. d[['<i4', '>i4']] d[['<f4', '>f4']] t2 = table.Table.from_pandas(d) for column in t.columns: if column in ('u', 's'): assert np.all(t[column] == t2[column]) else: assert_allclose(t[column], t2[column]) if t[column].dtype.byteorder in ('=', '|'): assert t[column].dtype == t2[column].dtype else: assert t[column].byteswap().newbyteorder().dtype == t2[column].dtype def test_2d(self): t = table.Table() t['a'] = [1, 2, 3] t['b'] = np.ones((3, 2)) with pytest.raises(ValueError) as exc: t.to_pandas() assert exc.value.args[0] == "Cannot convert a table with multi-dimensional columns to a pandas DataFrame" def test_mixin(self): from ...coordinates import SkyCoord t = table.Table() t['c'] = SkyCoord([1, 2, 3], [4, 5, 6], unit='deg') with pytest.raises(ValueError) as exc: t.to_pandas() assert exc.value.args[0] == "Cannot convert a table with mixin columns to a pandas DataFrame" def test_masking(self): t = table.Table(masked=True) t['a'] = [1, 2, 3] t['a'].mask = [True, False, True] t['b'] = [1., 2., 3.] t['b'].mask = [False, False, True] t['u'] = ['a', 'b', 'c'] t['u'].mask = [False, True, False] t['s'] = ['a', 'b', 'c'] t['s'].mask = [False, True, False] d = t.to_pandas() t2 = table.Table.from_pandas(d) for name, column in t.columns.items(): assert np.all(column.data == t2[name].data) assert np.all(column.mask == t2[name].mask) # Masked integer type comes back as float. Nothing we can do about this. if column.dtype.kind == 'i': assert t2[name].dtype.kind == 'f' else: if column.dtype.byteorder in ('=', '|'): assert column.dtype == t2[name].dtype else: assert column.byteswap().newbyteorder().dtype == t2[name].dtype @pytest.mark.usefixtures('table_types') class TestReplaceColumn(SetupData): def test_fail_replace_column(self, table_types): """Raise exception when trying to replace column via table.columns object""" self._setup(table_types) t = table_types.Table([self.a, self.b]) with pytest.raises(ValueError): t.columns['a'] = [1, 2, 3] with pytest.raises(ValueError): t.replace_column('not there', [1, 2, 3]) def test_replace_column(self, table_types): """Replace existing column with a new column""" self._setup(table_types) t = table_types.Table([self.a, self.b]) ta = t['a'] tb = t['b'] vals = [1.2, 3.4, 5.6] for col in (vals, table_types.Column(vals), table_types.Column(vals, name='a'), table_types.Column(vals, name='b')): t.replace_column('a', col) assert np.all(t['a'] == vals) assert t['a'] is not ta # New a column assert t['b'] is tb # Original b column unchanged assert t.colnames == ['a', 'b'] assert t['a'].meta == {} assert t['a'].format is None def test_replace_index_column(self, table_types): """Replace index column and generate expected exception""" self._setup(table_types) t = table_types.Table([self.a, self.b]) t.add_index('a') with pytest.raises(ValueError) as err: t.replace_column('a', [1, 2, 3]) assert err.value.args[0] == 'cannot replace a table index column' class Test__Astropy_Table__(): """ Test initializing a Table subclass from a table-like object that implements the __astropy_table__ interface method. """ class SimpleTable: def __init__(self): self.columns = [[1, 2, 3], [4, 5, 6], [7, 8, 9] * u.m] self.names = ['a', 'b', 'c'] self.meta = OrderedDict([('a', 1), ('b', 2)]) def __astropy_table__(self, cls, copy, **kwargs): a, b, c = self.columns c.info.name = 'c' cols = [table.Column(a, name='a'), table.MaskedColumn(b, name='b'), c] names = [col.info.name for col in cols] return cls(cols, names=names, copy=copy, meta=kwargs or self.meta) def test_simple_1(self): """Make a SimpleTable and convert to Table, QTable with copy=False, True""" for table_cls in (table.Table, table.QTable): col_c_class = u.Quantity if table_cls is table.QTable else table.MaskedColumn for cpy in (False, True): st = self.SimpleTable() # Test putting in a non-native kwarg `extra_meta` to Table initializer t = table_cls(st, copy=cpy, extra_meta='extra!') assert t.colnames == ['a', 'b', 'c'] assert t.meta == {'extra_meta': 'extra!'} assert np.all(t['a'] == st.columns[0]) assert np.all(t['b'] == st.columns[1]) vals = t['c'].value if table_cls is table.QTable else t['c'] assert np.all(st.columns[2].value == vals) assert isinstance(t['a'], table.MaskedColumn) assert isinstance(t['b'], table.MaskedColumn) assert isinstance(t['c'], col_c_class) assert t['c'].unit is u.m assert type(t) is table_cls # Copy being respected? t['a'][0] = 10 assert st.columns[0][0] == 1 if cpy else 10 def test_simple_2(self): """Test converting a SimpleTable and changing column names and types""" st = self.SimpleTable() dtypes = [np.int32, np.float32, np.float16] names = ['a', 'b', 'c'] t = table.Table(st, dtype=dtypes, names=names, meta=OrderedDict([('c', 3)])) assert t.colnames == names assert all(col.dtype.type is dtype for col, dtype in zip(t.columns.values(), dtypes)) # The supplied meta is ignored. This is consistent with current # behavior when initializing from an existing astropy Table. assert t.meta == st.meta def test_kwargs_exception(self): """If extra kwargs provided but without initializing with a table-like object, exception is raised""" with pytest.raises(TypeError) as err: table.Table([[1]], extra_meta='extra!') assert '__init__() got unexpected keyword argument' in str(err) def test_replace_column_qtable(): """Replace existing Quantity column with a new column in a QTable""" a = [1, 2, 3] * u.m b = [4, 5, 6] t = table.QTable([a, b], names=['a', 'b']) ta = t['a'] tb = t['b'] ta.info.meta = {'aa': [0, 1, 2, 3, 4]} ta.info.format = '%f' t.replace_column('a', a.to('cm')) assert np.all(t['a'] == ta) assert t['a'] is not ta # New a column assert t['b'] is tb # Original b column unchanged assert t.colnames == ['a', 'b'] assert t['a'].info.meta is None assert t['a'].info.format is None def test_replace_update_column_via_setitem(): """ Test table update like ``t['a'] = value``. This leverages off the already well-tested ``replace_column`` and in-place update ``t['a'][:] = value``, so this testing is fairly light. """ a = [1, 2] * u.m b = [3, 4] t = table.QTable([a, b], names=['a', 'b']) assert isinstance(t['a'], u.Quantity) # Inplace update ta = t['a'] t['a'] = 5 * u.m assert np.all(t['a'] == [5, 5] * u.m) assert t['a'] is ta # Replace t['a'] = [5, 6] assert np.all(t['a'] == [5, 6]) assert isinstance(t['a'], table.Column) assert t['a'] is not ta def test_replace_update_column_via_setitem_warnings_normal(): """ Test warnings related to table replace change in #5556: Normal warning-free replace """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) with catch_warnings() as w: with table.conf.set_temp('replace_warnings', ['refcount', 'attributes', 'slice']): t['a'] = 0 # in-place update assert len(w) == 0 t['a'] = [10, 20, 30] # replace column assert len(w) == 0 def test_replace_update_column_via_setitem_warnings_slice(): """ Test warnings related to table replace change in #5556: Replace a slice, one warning. """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) with catch_warnings() as w: with table.conf.set_temp('replace_warnings', ['refcount', 'attributes', 'slice']): t2 = t[:2] t2['a'] = 0 # in-place slice update assert np.all(t['a'] == [0, 0, 3]) assert len(w) == 0 t2['a'] = [10, 20] # replace slice assert len(w) == 1 assert "replaced column 'a' which looks like an array slice" in str(w[0].message) def test_replace_update_column_via_setitem_warnings_attributes(): """ Test warnings related to table replace change in #5556: Lost attributes. """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) t['a'].unit = 'm' with catch_warnings() as w: with table.conf.set_temp('replace_warnings', ['refcount', 'attributes', 'slice']): t['a'] = [10, 20, 30] assert len(w) == 1 assert "replaced column 'a' and column attributes ['unit']" in str(w[0].message) def test_replace_update_column_via_setitem_warnings_refcount(): """ Test warnings related to table replace change in #5556: Reference count changes. """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) ta = t['a'] # Generate an extra reference to original column with catch_warnings() as w: with table.conf.set_temp('replace_warnings', ['refcount', 'attributes', 'slice']): t['a'] = [10, 20, 30] assert len(w) == 1 assert "replaced column 'a' and the number of references" in str(w[0].message) def test_replace_update_column_via_setitem_warnings_always(): """ Test warnings related to table replace change in #5556: Test 'always' setting that raises warning for any replace. """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) with catch_warnings() as w: with table.conf.set_temp('replace_warnings', ['always']): t['a'] = 0 # in-place slice update assert len(w) == 0 from inspect import currentframe, getframeinfo frameinfo = getframeinfo(currentframe()) t['a'] = [10, 20, 30] # replace column assert len(w) == 1 assert "replaced column 'a'" == str(w[0].message) # Make sure the warning points back to the user code line assert w[0].lineno == frameinfo.lineno + 1 assert w[0].category is table.TableReplaceWarning assert 'test_table' in w[0].filename def test_replace_update_column_via_setitem_replace_inplace(): """ Test the replace_inplace config option related to #5556. In this case no replace is done. """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) ta = t['a'] t['a'].unit = 'm' with catch_warnings() as w: with table.conf.set_temp('replace_inplace', True): with table.conf.set_temp('replace_warnings', ['always', 'refcount', 'attributes', 'slice']): t['a'] = 0 # in-place update assert len(w) == 0 assert ta is t['a'] t['a'] = [10, 20, 30] # normally replaces column, but not now assert len(w) == 0 assert ta is t['a'] assert np.all(t['a'] == [10, 20, 30]) def test_primary_key_is_inherited(): """Test whether a new Table inherits the primary_key attribute from its parent Table. Issue #4672""" t = table.Table([(2, 3, 2, 1), (8, 7, 6, 5)], names=('a', 'b')) t.add_index('a') original_key = t.primary_key # can't test if tuples are equal, so just check content assert original_key[0] is 'a' t2 = t[:] t3 = t.copy() t4 = table.Table(t) # test whether the reference is the same in the following assert original_key == t2.primary_key assert original_key == t3.primary_key assert original_key == t4.primary_key # just test one element, assume rest are equal if assert passes assert t.loc[1] == t2.loc[1] assert t.loc[1] == t3.loc[1] assert t.loc[1] == t4.loc[1] def test_qtable_read_for_ipac_table_with_char_columns(): '''Test that a char column of a QTable is assigned no unit and not a dimensionless unit, otherwise conversion of reader output to QTable fails.''' t1 = table.QTable([["A"]], names="B") out = StringIO() t1.write(out, format="ascii.ipac") t2 = table.QTable.read(out.getvalue(), format="ascii.ipac", guess=False) assert t2["B"].unit is None
c9e8c05bb563366f68c77aa283cdc7f4036469d2481478e88bac8db222e6519b
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import warnings from io import StringIO from collections import OrderedDict import numpy as np from ... import units as u from ... import time from ... import coordinates from ... import table from ...utils.data_info import data_info_factory, dtype_info_name from ..table_helpers import simple_table def test_table_info_attributes(table_types): """ Test the info() method of printing a summary of table column attributes """ a = np.array([1, 2, 3], dtype='int32') b = np.array([1, 2, 3], dtype='float32') c = np.array(['a', 'c', 'e'], dtype='|S1') t = table_types.Table([a, b, c], names=['a', 'b', 'c']) # Minimal output for a typical table tinfo = t.info(out=None) subcls = ['class'] if table_types.Table.__name__ == 'MyTable' else [] assert tinfo.colnames == ['name', 'dtype', 'shape', 'unit', 'format', 'description', 'class', 'n_bad', 'length'] assert np.all(tinfo['name'] == ['a', 'b', 'c']) assert np.all(tinfo['dtype'] == ['int32', 'float32', dtype_info_name('S1')]) if subcls: assert np.all(tinfo['class'] == ['MyColumn'] * 3) # All output fields including a mixin column t['d'] = [1, 2, 3] * u.m t['d'].description = 'quantity' t['a'].format = '%02d' t['e'] = time.Time([1, 2, 3], format='mjd') t['e'].info.description = 'time' t['f'] = coordinates.SkyCoord([1, 2, 3], [1, 2, 3], unit='deg') t['f'].info.description = 'skycoord' tinfo = t.info(out=None) assert np.all(tinfo['name'] == 'a b c d e f'.split()) assert np.all(tinfo['dtype'] == ['int32', 'float32', dtype_info_name('S1'), 'float64', 'object', 'object']) assert np.all(tinfo['unit'] == ['', '', '', 'm', '', 'deg,deg']) assert np.all(tinfo['format'] == ['%02d', '', '', '', '', '']) assert np.all(tinfo['description'] == ['', '', '', 'quantity', 'time', 'skycoord']) cls = t.ColumnClass.__name__ assert np.all(tinfo['class'] == [cls, cls, cls, cls, 'Time', 'SkyCoord']) # Test that repr(t.info) is same as t.info() out = StringIO() t.info(out=out) assert repr(t.info) == out.getvalue() def test_table_info_stats(table_types): """ Test the info() method of printing a summary of table column statistics """ a = np.array([1, 2, 1, 2], dtype='int32') b = np.array([1, 2, 1, 2], dtype='float32') c = np.array(['a', 'c', 'e', 'f'], dtype='|S1') d = time.Time([1, 2, 1, 2], format='mjd') t = table_types.Table([a, b, c, d], names=['a', 'b', 'c', 'd']) # option = 'stats' masked = 'masked=True ' if t.masked else '' out = StringIO() t.info('stats', out=out) table_header_line = '<{0} {1}length=4>'.format(t.__class__.__name__, masked) exp = [table_header_line, 'name mean std min max', '---- ---- --- --- ---', ' a 1.5 0.5 1 2', ' b 1.5 0.5 1.0 2.0', ' c -- -- -- --', ' d -- -- 1.0 2.0'] assert out.getvalue().splitlines() == exp # option = ['attributes', 'stats'] tinfo = t.info(['attributes', 'stats'], out=None) assert tinfo.colnames == ['name', 'dtype', 'shape', 'unit', 'format', 'description', 'class', 'mean', 'std', 'min', 'max', 'n_bad', 'length'] assert np.all(tinfo['mean'] == ['1.5', '1.5', '--', '--']) assert np.all(tinfo['std'] == ['0.5', '0.5', '--', '--']) assert np.all(tinfo['min'] == ['1', '1.0', '--', '1.0']) assert np.all(tinfo['max'] == ['2', '2.0', '--', '2.0']) out = StringIO() t.info('stats', out=out) exp = [table_header_line, 'name mean std min max', '---- ---- --- --- ---', ' a 1.5 0.5 1 2', ' b 1.5 0.5 1.0 2.0', ' c -- -- -- --', ' d -- -- 1.0 2.0'] assert out.getvalue().splitlines() == exp # option = ['attributes', custom] custom = data_info_factory(names=['sum', 'first'], funcs=[np.sum, lambda col: col[0]]) out = StringIO() tinfo = t.info(['attributes', custom], out=None) assert tinfo.colnames == ['name', 'dtype', 'shape', 'unit', 'format', 'description', 'class', 'sum', 'first', 'n_bad', 'length'] assert np.all(tinfo['name'] == ['a', 'b', 'c', 'd']) assert np.all(tinfo['dtype'] == ['int32', 'float32', dtype_info_name('S1'), 'object']) assert np.all(tinfo['sum'] == ['6', '6.0', '--', '--']) assert np.all(tinfo['first'] == ['1', '1.0', 'a', '1.0']) def test_data_info(): """ Test getting info for just a column. """ cols = [table.Column([1.0, 2.0, np.nan], name='name', description='description', unit='m/s'), table.MaskedColumn([1.0, 2.0, 3.0], name='name', description='description', unit='m/s', mask=[False, False, True])] for c in cols: # Test getting the full ordered dict cinfo = c.info(out=None) assert cinfo == OrderedDict([('name', 'name'), ('dtype', 'float64'), ('shape', ''), ('unit', 'm / s'), ('format', ''), ('description', 'description'), ('class', type(c).__name__), ('n_bad', 1), ('length', 3)]) # Test the console (string) version which omits trivial values out = StringIO() c.info(out=out) exp = ['name = name', 'dtype = float64', 'unit = m / s', 'description = description', 'class = {0}'.format(type(c).__name__), 'n_bad = 1', 'length = 3'] assert out.getvalue().splitlines() == exp # repr(c.info) gives the same as c.info() assert repr(c.info) == out.getvalue() # Test stats info cinfo = c.info('stats', out=None) assert cinfo == OrderedDict([('name', 'name'), ('mean', '1.5'), ('std', '0.5'), ('min', '1.0'), ('max', '2.0'), ('n_bad', 1), ('length', 3)]) def test_data_info_subclass(): class Column(table.Column): """ Confusingly named Column on purpose, but that is legal. """ pass for data in ([], [1, 2]): c = Column(data, dtype='int64') cinfo = c.info(out=None) assert cinfo == OrderedDict([('dtype', 'int64'), ('shape', ''), ('unit', ''), ('format', ''), ('description', ''), ('class', 'Column'), ('n_bad', 0), ('length', len(data))]) def test_scalar_info(): """ Make sure info works with scalar values """ c = time.Time('2000:001') cinfo = c.info(out=None) assert cinfo['n_bad'] == 0 assert 'length' not in cinfo def test_empty_table(): t = table.Table() out = StringIO() t.info(out=out) exp = ['<Table length=0>', '<No columns>'] assert out.getvalue().splitlines() == exp def test_class_attribute(): """ Test that class info column is suppressed only for identical non-mixin columns. """ vals = [[1] * u.m, [2] * u.m] texp = ['<Table length=1>', 'name dtype unit', '---- ------- ----', 'col0 float64 m', 'col1 float64 m'] qexp = ['<QTable length=1>', 'name dtype unit class ', '---- ------- ---- --------', 'col0 float64 m Quantity', 'col1 float64 m Quantity'] for table_cls, exp in ((table.Table, texp), (table.QTable, qexp)): t = table_cls(vals) out = StringIO() t.info(out=out) assert out.getvalue().splitlines() == exp def test_ignore_warnings(): t = table.Table([[np.nan, np.nan]]) with warnings.catch_warnings(record=True) as warns: t.info('stats', out=None) assert len(warns) == 0 def test_no_deprecation_warning(): # regression test for #5459, where numpy deprecation warnings were # emitted unnecessarily. t = simple_table() with warnings.catch_warnings(record=True) as warns: t.info() assert len(warns) == 0
01827df83e0ae065132cc5b2b95721451331b1019ec46be4a5b4f6a8809d0a53
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from .test_table import SetupData from ..bst import BST, FastRBT, FastBST from ..sorted_array import SortedArray from ..table import QTable, Row, Table from ... import units as u from ...time import Time from ..column import BaseColumn try: import bintrees except ImportError: HAS_BINTREES = False else: HAS_BINTREES = True if HAS_BINTREES: available_engines = [BST, FastBST, FastRBT, SortedArray] else: available_engines = [BST, SortedArray] @pytest.fixture(params=available_engines) def engine(request): return request.param _col = [1, 2, 3, 4, 5] @pytest.fixture(params=[ _col, u.Quantity(_col), Time(_col, format='jyear'), ]) def main_col(request): return request.param def assert_col_equal(col, array): if isinstance(col, Time): assert np.all(col == Time(array, format='jyear')) else: assert np.all(col == col.__class__(array)) @pytest.mark.usefixtures('table_types') class TestIndex(SetupData): def _setup(self, main_col, table_types): super()._setup(table_types) self.main_col = main_col if isinstance(main_col, u.Quantity): self._table_type = QTable if not isinstance(main_col, list): self._column_type = lambda x: x # don't change mixin type self.mutable = isinstance(main_col, (list, u.Quantity)) def make_col(self, name, lst): return self._column_type(lst, name=name) def make_val(self, val): if isinstance(self.main_col, Time): return Time(val, format='jyear') return val @property def t(self): if not hasattr(self, '_t'): self._t = self._table_type() self._t['a'] = self._column_type(self.main_col) self._t['b'] = self._column_type([4.0, 5.1, 6.2, 7.0, 1.1]) self._t['c'] = self._column_type(['7', '8', '9', '10', '11']) return self._t @pytest.mark.parametrize("composite", [False, True]) def test_table_index(self, main_col, table_types, composite, engine): self._setup(main_col, table_types) t = self.t t.add_index(('a', 'b') if composite else 'a', engine=engine) assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) if not self.mutable: return # test altering table columns t['a'][0] = 4 t.add_row((6, 6.0, '7')) t['a'][3] = 10 t.remove_row(2) t.add_row((4, 5.0, '9')) assert_col_equal(t['a'], np.array([4, 2, 10, 5, 6, 4])) assert np.allclose(t['b'], np.array([4.0, 5.1, 7.0, 1.1, 6.0, 5.0])) assert np.all(t['c'].data == np.array(['7', '8', '10', '11', '7', '9'])) index = t.indices[0] l = list(index.data.items()) if composite: assert np.all(l == [((2, 5.1), [1]), ((4, 4.0), [0]), ((4, 5.0), [5]), ((5, 1.1), [3]), ((6, 6.0), [4]), ((10, 7.0), [2])]) else: assert np.all(l == [((2,), [1]), ((4,), [0, 5]), ((5,), [3]), ((6,), [4]), ((10,), [2])]) t.remove_indices('a') assert len(t.indices) == 0 def test_table_slicing(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t t.add_index('a', engine=engine) assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) for slice_ in ([0, 2], np.array([0, 2])): t2 = t[slice_] # t2 should retain an index on column 'a' assert len(t2.indices) == 1 assert_col_equal(t2['a'], [1, 3]) # the index in t2 should reorder row numbers after slicing assert np.all(t2.indices[0].sorted_data() == [0, 1]) # however, this index should be a deep copy of t1's index assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) def test_remove_rows(self, main_col, table_types, engine): self._setup(main_col, table_types) if not self.mutable: return t = self.t t.add_index('a', engine=engine) # remove individual row t2 = t.copy() t2.remove_rows(2) assert_col_equal(t2['a'], [1, 2, 4, 5]) assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3]) # remove by list, ndarray, or slice for cut in ([0, 2, 4], np.array([0, 2, 4]), slice(0, 5, 2)): t2 = t.copy() t2.remove_rows(cut) assert_col_equal(t2['a'], [2, 4]) assert np.all(t2.indices[0].sorted_data() == [0, 1]) with pytest.raises(ValueError): t.remove_rows((0, 2, 4)) def test_col_get_slice(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t t.add_index('a', engine=engine) # get slice t2 = t[1:3] # table slice assert_col_equal(t2['a'], [2, 3]) assert np.all(t2.indices[0].sorted_data() == [0, 1]) col_slice = t['a'][1:3] assert_col_equal(col_slice, [2, 3]) # true column slices discard indices if isinstance(t['a'], BaseColumn): assert len(col_slice.info.indices) == 0 # take slice of slice t2 = t[::2] assert_col_equal(t2['a'], np.array([1, 3, 5])) t3 = t2[::-1] assert_col_equal(t3['a'], np.array([5, 3, 1])) assert np.all(t3.indices[0].sorted_data() == [2, 1, 0]) t3 = t2[:2] assert_col_equal(t3['a'], np.array([1, 3])) assert np.all(t3.indices[0].sorted_data() == [0, 1]) # out-of-bound slices for t_empty in (t2[3:], t2[2:1], t3[2:]): assert len(t_empty['a']) == 0 assert np.all(t_empty.indices[0].sorted_data() == []) if self.mutable: # get boolean mask mask = t['a'] % 2 == 1 t2 = t[mask] assert_col_equal(t2['a'], [1, 3, 5]) assert np.all(t2.indices[0].sorted_data() == [0, 1, 2]) def test_col_set_slice(self, main_col, table_types, engine): self._setup(main_col, table_types) if not self.mutable: return t = self.t t.add_index('a', engine=engine) # set slice t2 = t.copy() t2['a'][1:3] = np.array([6, 7]) assert_col_equal(t2['a'], np.array([1, 6, 7, 4, 5])) assert np.all(t2.indices[0].sorted_data() == [0, 3, 4, 1, 2]) # change original table via slice reference t2 = t.copy() t3 = t2[1:3] assert_col_equal(t3['a'], np.array([2, 3])) assert np.all(t3.indices[0].sorted_data() == [0, 1]) t3['a'][0] = 5 assert_col_equal(t3['a'], np.array([5, 3])) assert_col_equal(t2['a'], np.array([1, 5, 3, 4, 5])) assert np.all(t3.indices[0].sorted_data() == [1, 0]) assert np.all(t2.indices[0].sorted_data() == [0, 2, 3, 1, 4]) # set boolean mask t2 = t.copy() mask = t['a'] % 2 == 1 t2['a'][mask] = 0. assert_col_equal(t2['a'], [0, 2, 0, 4, 0]) assert np.all(t2.indices[0].sorted_data() == [0, 2, 4, 1, 3]) def test_multiple_slices(self, main_col, table_types, engine): self._setup(main_col, table_types) if not self.mutable: return t = self.t t.add_index('a', engine=engine) for i in range(6, 51): t.add_row((i, 1.0, 'A')) assert_col_equal(t['a'], [i for i in range(1, 51)]) assert np.all(t.indices[0].sorted_data() == [i for i in range(50)]) evens = t[::2] assert np.all(evens.indices[0].sorted_data() == [i for i in range(25)]) reverse = evens[::-1] index = reverse.indices[0] assert (index.start, index.stop, index.step) == (48, -2, -2) assert np.all(index.sorted_data() == [i for i in range(24, -1, -1)]) # modify slice of slice reverse[-10:] = 0 expected = np.array([i for i in range(1, 51)]) expected[:20][expected[:20] % 2 == 1] = 0 assert_col_equal(t['a'], expected) assert_col_equal(evens['a'], expected[::2]) assert_col_equal(reverse['a'], expected[::2][::-1]) # first ten evens are now zero assert np.all(t.indices[0].sorted_data() == [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19] + [i for i in range(20, 50)]) assert np.all(evens.indices[0].sorted_data() == [i for i in range(25)]) assert np.all(reverse.indices[0].sorted_data() == [i for i in range(24, -1, -1)]) # try different step sizes of slice t2 = t[1:20:2] assert_col_equal(t2['a'], [2, 4, 6, 8, 10, 12, 14, 16, 18, 20]) assert np.all(t2.indices[0].sorted_data() == [i for i in range(10)]) t3 = t2[::3] assert_col_equal(t3['a'], [2, 8, 14, 20]) assert np.all(t3.indices[0].sorted_data() == [0, 1, 2, 3]) t4 = t3[2::-1] assert_col_equal(t4['a'], [14, 8, 2]) assert np.all(t4.indices[0].sorted_data() == [2, 1, 0]) def test_sort(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t[::-1] # reverse table assert_col_equal(t['a'], [5, 4, 3, 2, 1]) t.add_index('a', engine=engine) assert np.all(t.indices[0].sorted_data() == [4, 3, 2, 1, 0]) if not self.mutable: return # sort table by column a t2 = t.copy() t2.sort('a') assert_col_equal(t2['a'], [1, 2, 3, 4, 5]) assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4]) # sort table by primary key t2 = t.copy() t2.sort() assert_col_equal(t2['a'], [1, 2, 3, 4, 5]) assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4]) def test_insert_row(self, main_col, table_types, engine): self._setup(main_col, table_types) if not self.mutable: return t = self.t t.add_index('a', engine=engine) t.insert_row(2, (6, 1.0, '12')) assert_col_equal(t['a'], [1, 2, 6, 3, 4, 5]) assert np.all(t.indices[0].sorted_data() == [0, 1, 3, 4, 5, 2]) t.insert_row(1, (0, 4.0, '13')) assert_col_equal(t['a'], [1, 0, 2, 6, 3, 4, 5]) assert np.all(t.indices[0].sorted_data() == [1, 0, 2, 4, 5, 6, 3]) def test_index_modes(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t t.add_index('a', engine=engine) # first, no special mode assert len(t[[1, 3]].indices) == 1 assert len(t[::-1].indices) == 1 assert len(self._table_type(t).indices) == 1 assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) t2 = t.copy() # non-copy mode with t.index_mode('discard_on_copy'): assert len(t[[1, 3]].indices) == 0 assert len(t[::-1].indices) == 0 assert len(self._table_type(t).indices) == 0 assert len(t2.copy().indices) == 1 # mode should only affect t # make sure non-copy mode is exited correctly assert len(t[[1, 3]].indices) == 1 if not self.mutable: return # non-modify mode with t.index_mode('freeze'): assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) t['a'][0] = 6 assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) t.add_row((2, 1.5, '12')) assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) t.remove_rows([1, 3]) assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) assert_col_equal(t['a'], [6, 3, 5, 2]) # mode should only affect t assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4]) t2['a'][0] = 6 assert np.all(t2.indices[0].sorted_data() == [1, 2, 3, 4, 0]) # make sure non-modify mode is exited correctly assert np.all(t.indices[0].sorted_data() == [3, 1, 2, 0]) if isinstance(t['a'], BaseColumn): assert len(t['a'][::-1].info.indices) == 0 with t.index_mode('copy_on_getitem'): assert len(t['a'][[1, 2]].info.indices) == 1 # mode should only affect t assert len(t2['a'][[1, 2]].info.indices) == 0 assert len(t['a'][::-1].info.indices) == 0 assert len(t2['a'][::-1].info.indices) == 0 def test_index_retrieval(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t t.add_index('a', engine=engine) t.add_index(['a', 'c'], engine=engine) assert len(t.indices) == 2 assert len(t.indices['a'].columns) == 1 assert len(t.indices['a', 'c'].columns) == 2 with pytest.raises(IndexError): t.indices['b'] def test_col_rename(self, main_col, table_types, engine): ''' Checks for a previous bug in which copying a Table with different column names raised an exception. ''' self._setup(main_col, table_types) t = self.t t.add_index('a', engine=engine) t2 = self._table_type(self.t, names=['d', 'e', 'f']) assert len(t2.indices) == 1 def test_table_loc(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t t.add_index('a', engine=engine) t.add_index('b', engine=engine) t2 = t.loc[self.make_val(3)] # single label, with primary key 'a' assert_col_equal(t2['a'], [3]) assert isinstance(t2, Row) # list search t2 = t.loc[[self.make_val(1), self.make_val(4), self.make_val(2)]] assert_col_equal(t2['a'], [1, 4, 2]) # same order as input list if not isinstance(main_col, Time): # ndarray search t2 = t.loc[np.array([1, 4, 2])] assert_col_equal(t2['a'], [1, 4, 2]) assert_col_equal(t2['a'], [1, 4, 2]) t2 = t.loc[self.make_val(3): self.make_val(5)] # range search assert_col_equal(t2['a'], [3, 4, 5]) t2 = t.loc['b', 5.0:7.0] assert_col_equal(t2['b'], [5.1, 6.2, 7.0]) # search by sorted index t2 = t.iloc[0:2] # two smallest rows by column 'a' assert_col_equal(t2['a'], [1, 2]) t2 = t.iloc['b', 2:] # exclude two smallest rows in column 'b' assert_col_equal(t2['b'], [5.1, 6.2, 7.0]) for t2 in (t.loc[:], t.iloc[:]): assert_col_equal(t2['a'], [1, 2, 3, 4, 5]) def test_table_loc_indices(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t t.add_index('a', engine=engine) t.add_index('b', engine=engine) t2 = t.loc_indices[self.make_val(3)] # single label, with primary key 'a' assert t2 == 2 # list search t2 = t.loc_indices[[self.make_val(1), self.make_val(4), self.make_val(2)]] for i, p in zip(t2,[1,4,2]): # same order as input list assert i == p-1 def test_invalid_search(self, main_col, table_types, engine): # using .loc and .loc_indices with a value not present should raise an exception self._setup(main_col, table_types) t = self.t t.add_index('a') with pytest.raises(KeyError): t.loc[self.make_val(6)] with pytest.raises(KeyError): t.loc_indices[self.make_val(6)] def test_copy_index_references(self, main_col, table_types, engine): # check against a bug in which indices were given an incorrect # column reference when copied self._setup(main_col, table_types) t = self.t t.add_index('a') t.add_index('b') t2 = t.copy() assert t2.indices['a'].columns[0] is t2['a'] assert t2.indices['b'].columns[0] is t2['b'] def test_unique_index(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t t.add_index('a', engine=engine, unique=True) assert np.all(t.indices['a'].sorted_data() == [0, 1, 2, 3, 4]) if self.mutable: with pytest.raises(ValueError): t.add_row((5, 5.0, '9')) def test_copy_indexed_table(self, table_types): self._setup(_col, table_types) t = self.t t.add_index('a') t.add_index(['a', 'b']) for tp in (self._table_type(t), t.copy()): assert len(t.indices) == len(tp.indices) for index, indexp in zip(t.indices, tp.indices): assert np.all(index.data.data == indexp.data.data) assert index.data.data.colnames == indexp.data.data.colnames def test_updating_row_byindex(self, main_col, table_types, engine): self._setup(main_col, table_types) t = Table([['a', 'b', 'c', 'd'], [2, 3, 4, 5], [3, 4, 5, 6]], names=('a', 'b', 'c'), meta={'name': 'first table'}) t.add_index('a', engine=engine) t.add_index('b', engine=engine) t.loc['c'] = ['g', 40, 50] # single label, with primary key 'a' t2 = t[2] assert list(t2) == ['g', 40, 50] # list search t.loc[['a', 'd', 'b']] = [['a', 20, 30], ['d', 50, 60], ['b', 30, 40]] t2 = [['a', 20, 30], ['d', 50, 60], ['b', 30, 40]] for i, p in zip(t2, [1, 4, 2]): # same order as input list assert list(t[p-1]) == i def test_invalid_updation(self, main_col, table_types, engine): # using .loc and .loc_indices with a value not present should raise an exception self._setup(main_col, table_types) t = Table([[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6]], names=('a', 'b', 'c'), meta={'name': 'first table'}) t.add_index('a') with pytest.raises(ValueError): t.loc[3] = [[1,2,3]] with pytest.raises(ValueError): t.loc[[1, 4, 2]] = [[1, 2, 3], [4, 5, 6]] with pytest.raises(ValueError): t.loc[[1, 4, 2]] = [[1, 2, 3], [4, 5, 6], [2, 3]] with pytest.raises(ValueError): t.loc[[1, 4, 2]] = [[1, 2, 3], [4, 5], [2, 3]]
5689876df26b02b5f99e5d20f350bfe133ee9126c301dcc64ba2162dc689dab2
from os.path import abspath, dirname, join import textwrap import pytest from ..table import Table from ... import extern from ...utils.xml.writer import HAS_BLEACH try: import IPython # pylint: disable=W0611 except ImportError: HAS_IPYTHON = False else: HAS_IPYTHON = True EXTERN_DIR = abspath(dirname(extern.__file__)) REFERENCE = """ <html> <head> <meta charset="utf-8"/> <meta content="text/html;charset=UTF-8" http-equiv="Content-type"/> <style> body {font-family: sans-serif;} table.dataTable {width: auto !important; margin: 0 !important;} .dataTables_filter, .dataTables_paginate {float: left !important; margin-left:1em} </style> <link href="%(datatables_css_url)s" rel="stylesheet" type="text/css"/> <script src="%(jquery_url)s"> </script> <script src="%(datatables_js_url)s"> </script> </head> <body> <script> var astropy_sort_num = function(a, b) { var a_num = parseFloat(a); var b_num = parseFloat(b); if (isNaN(a_num) && isNaN(b_num)) return ((a < b) ? -1 : ((a > b) ? 1 : 0)); else if (!isNaN(a_num) && !isNaN(b_num)) return ((a_num < b_num) ? -1 : ((a_num > b_num) ? 1 : 0)); else return isNaN(a_num) ? -1 : 1; } jQuery.extend( jQuery.fn.dataTableExt.oSort, { "optionalnum-asc": astropy_sort_num, "optionalnum-desc": function (a,b) { return -astropy_sort_num(a, b); } }); $(document).ready(function() { $('#%(table_id)s').dataTable({ order: [], pageLength: %(length)s, lengthMenu: [[%(display_length)s, -1], [%(display_length)s, 'All']], pagingType: "full_numbers", columnDefs: [{targets: [0], type: "optionalnum"}] }); } ); </script> <table class="%(table_class)s" id="%(table_id)s"> <thead> <tr> <th>a</th> <th>b</th> </tr> </thead> %(lines)s </table> </body> </html> """ TPL = (' <tr>\n' ' <td>{0}</td>\n' ' <td>{1}</td>\n' ' </tr>') def format_lines(col1, col2): return '\n'.join(TPL.format(a, b) for a, b in zip(col1, col2)) def test_write_jsviewer_default(tmpdir): t = Table() t['a'] = [1, 2, 3, 4, 5] t['b'] = ['a', 'b', 'c', 'd', 'e'] t['a'].unit = 'm' tmpfile = tmpdir.join('test.html').strpath t.write(tmpfile, format='jsviewer') ref = REFERENCE % dict( lines=format_lines(t['a'], t['b']), table_class='display compact', table_id='table%s' % id(t), length='50', display_length='10, 25, 50, 100, 500, 1000', datatables_css_url='https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css', datatables_js_url='https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js', jquery_url='https://code.jquery.com/jquery-3.1.1.min.js' ) with open(tmpfile) as f: assert f.read().strip() == ref.strip() @pytest.mark.skipif('not HAS_BLEACH') def test_write_jsviewer_options(tmpdir): t = Table() t['a'] = [1, 2, 3, 4, 5] t['b'] = ['<b>a</b>', 'b', 'c', 'd', 'e'] t['a'].unit = 'm' tmpfile = tmpdir.join('test.html').strpath t.write(tmpfile, format='jsviewer', table_id='test', max_lines=3, jskwargs={'display_length': 5}, table_class='display hover', htmldict=dict(raw_html_cols='b')) ref = REFERENCE % dict( lines=format_lines(t['a'][:3], t['b'][:3]), table_class='display hover', table_id='test', length='5', display_length='5, 10, 25, 50, 100, 500, 1000', datatables_css_url='https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css', datatables_js_url='https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js', jquery_url='https://code.jquery.com/jquery-3.1.1.min.js' ) with open(tmpfile) as f: assert f.read().strip() == ref.strip() def test_write_jsviewer_local(tmpdir): t = Table() t['a'] = [1, 2, 3, 4, 5] t['b'] = ['a', 'b', 'c', 'd', 'e'] t['a'].unit = 'm' tmpfile = tmpdir.join('test.html').strpath t.write(tmpfile, format='jsviewer', table_id='test', jskwargs={'use_local_files': True}) ref = REFERENCE % dict( lines=format_lines(t['a'], t['b']), table_class='display compact', table_id='test', length='50', display_length='10, 25, 50, 100, 500, 1000', datatables_css_url='file://' + join(EXTERN_DIR, 'css', 'jquery.dataTables.css'), datatables_js_url='file://' + join(EXTERN_DIR, 'js', 'jquery.dataTables.min.js'), jquery_url='file://' + join(EXTERN_DIR, 'js', 'jquery-3.1.1.min.js') ) with open(tmpfile) as f: assert f.read().strip() == ref.strip() @pytest.mark.skipif('not HAS_IPYTHON') def test_show_in_notebook(): t = Table() t['a'] = [1, 2, 3, 4, 5] t['b'] = ['b', 'c', 'a', 'd', 'e'] htmlstr_windx = t.show_in_notebook().data # should default to 'idx' htmlstr_windx_named = t.show_in_notebook(show_row_index='realidx').data htmlstr_woindx = t.show_in_notebook(show_row_index=False).data assert (textwrap.dedent(""" <thead><tr><th>idx</th><th>a</th><th>b</th></tr></thead> <tr><td>0</td><td>1</td><td>b</td></tr> <tr><td>1</td><td>2</td><td>c</td></tr> <tr><td>2</td><td>3</td><td>a</td></tr> <tr><td>3</td><td>4</td><td>d</td></tr> <tr><td>4</td><td>5</td><td>e</td></tr> """).strip() in htmlstr_windx) assert '<thead><tr><th>realidx</th><th>a</th><th>b</th></tr></thead>' in htmlstr_windx_named assert '<thead><tr><th>a</th><th>b</th></tr></thead>' in htmlstr_woindx
c67998bc4cfad931c8f7fe00d893cd45473027341e2d52b6aef168023e3752e6
# Licensed under a 3-clause BSD style license - see LICENSE.rst from collections import OrderedDict, Mapping, UserDict import pytest import numpy as np from ...table import Column, TableColumns class TestTableColumnsInit(): def test_init(self): """Test initialisation with lists, tuples, dicts of arrays rather than Columns [regression test for #2647]""" x1 = np.arange(10.) x2 = np.arange(5.) x3 = np.arange(7.) col_list = [('x1', x1), ('x2', x2), ('x3', x3)] tc_list = TableColumns(col_list) for col in col_list: assert col[0] in tc_list assert tc_list[col[0]] is col[1] col_tuple = (('x1', x1), ('x2', x2), ('x3', x3)) tc_tuple = TableColumns(col_tuple) for col in col_tuple: assert col[0] in tc_tuple assert tc_tuple[col[0]] is col[1] col_dict = dict([('x1', x1), ('x2', x2), ('x3', x3)]) tc_dict = TableColumns(col_dict) for col in tc_dict.keys(): assert col in tc_dict assert tc_dict[col] is col_dict[col] columns = [Column(col[1], name=col[0]) for col in col_list] tc = TableColumns(columns) for col in columns: assert col.name in tc assert tc[col.name] is col # pytest.mark.usefixtures('table_type') class BaseInitFrom(): def _setup(self, table_type): pass def test_basic_init(self, table_type): self._setup(table_type) t = table_type(self.data, names=('a', 'b', 'c')) assert t.colnames == ['a', 'b', 'c'] assert np.all(t['a'] == np.array([1, 3])) assert np.all(t['b'] == np.array([2, 4])) assert np.all(t['c'] == np.array([3, 5])) assert all(t[name].name == name for name in t.colnames) def test_set_dtype(self, table_type): self._setup(table_type) t = table_type(self.data, names=('a', 'b', 'c'), dtype=('i4', 'f4', 'f8')) assert t.colnames == ['a', 'b', 'c'] assert np.all(t['a'] == np.array([1, 3], dtype='i4')) assert np.all(t['b'] == np.array([2, 4], dtype='f4')) assert np.all(t['c'] == np.array([3, 5], dtype='f8')) assert t['a'].dtype.type == np.int32 assert t['b'].dtype.type == np.float32 assert t['c'].dtype.type == np.float64 assert all(t[name].name == name for name in t.colnames) def test_names_dtype_mismatch(self, table_type): self._setup(table_type) with pytest.raises(ValueError): table_type(self.data, names=('a',), dtype=('i4', 'f4', 'i4')) def test_names_cols_mismatch(self, table_type): self._setup(table_type) with pytest.raises(ValueError): table_type(self.data, names=('a',), dtype=('i4')) @pytest.mark.usefixtures('table_type') class BaseInitFromListLike(BaseInitFrom): def test_names_cols_mismatch(self, table_type): self._setup(table_type) with pytest.raises(ValueError): table_type(self.data, names=['a'], dtype=[int]) def test_names_copy_false(self, table_type): self._setup(table_type) with pytest.raises(ValueError): table_type(self.data, names=['a'], dtype=[int], copy=False) @pytest.mark.usefixtures('table_type') class BaseInitFromDictLike(BaseInitFrom): pass @pytest.mark.usefixtures('table_type') class TestInitFromNdarrayHomo(BaseInitFromListLike): def setup_method(self, method): self.data = np.array([(1, 2, 3), (3, 4, 5)], dtype='i4') def test_default_names(self, table_type): self._setup(table_type) t = table_type(self.data) assert t.colnames == ['col0', 'col1', 'col2'] def test_ndarray_ref(self, table_type): """Init with ndarray and copy=False and show that this is a reference to input ndarray""" self._setup(table_type) t = table_type(self.data, copy=False) t['col1'][1] = 0 assert t.as_array()['col1'][1] == 0 assert t['col1'][1] == 0 assert self.data[1][1] == 0 def test_partial_names_dtype(self, table_type): self._setup(table_type) t = table_type(self.data, names=['a', None, 'c'], dtype=[None, None, 'f8']) assert t.colnames == ['a', 'col1', 'c'] assert t['a'].dtype.type == np.int32 assert t['col1'].dtype.type == np.int32 assert t['c'].dtype.type == np.float64 assert all(t[name].name == name for name in t.colnames) def test_partial_names_ref(self, table_type): self._setup(table_type) t = table_type(self.data, names=['a', None, 'c']) assert t.colnames == ['a', 'col1', 'c'] assert t['a'].dtype.type == np.int32 assert t['col1'].dtype.type == np.int32 assert t['c'].dtype.type == np.int32 assert all(t[name].name == name for name in t.colnames) @pytest.mark.usefixtures('table_type') class TestInitFromListOfLists(BaseInitFromListLike): def setup_method(self, table_type): self._setup(table_type) self.data = [(np.int32(1), np.int32(3)), Column(name='col1', data=[2, 4], dtype=np.int32), np.array([3, 5], dtype=np.int32)] def test_default_names(self, table_type): self._setup(table_type) t = table_type(self.data) assert t.colnames == ['col0', 'col1', 'col2'] assert all(t[name].name == name for name in t.colnames) def test_partial_names_dtype(self, table_type): self._setup(table_type) t = table_type(self.data, names=['b', None, 'c'], dtype=['f4', None, 'f8']) assert t.colnames == ['b', 'col1', 'c'] assert t['b'].dtype.type == np.float32 assert t['col1'].dtype.type == np.int32 assert t['c'].dtype.type == np.float64 assert all(t[name].name == name for name in t.colnames) def test_bad_data(self, table_type): self._setup(table_type) with pytest.raises(ValueError): table_type([[1, 2], [3, 4, 5]]) @pytest.mark.usefixtures('table_type') class TestInitFromListOfDicts(BaseInitFromListLike): def _setup(self, table_type): self.data = [{'a': 1, 'b': 2, 'c': 3}, {'a': 3, 'b': 4, 'c': 5}] def test_names(self, table_type): self._setup(table_type) t = table_type(self.data) assert all(colname in set(['a', 'b', 'c']) for colname in t.colnames) def test_names_ordered(self, table_type): self._setup(table_type) t = table_type(self.data, names=('c', 'b', 'a')) assert t.colnames == ['c', 'b', 'a'] def test_bad_data(self, table_type): self._setup(table_type) with pytest.raises(ValueError): table_type([{'a': 1, 'b': 2, 'c': 3}, {'a': 2, 'b': 4}]) @pytest.mark.usefixtures('table_type') class TestInitFromColsList(BaseInitFromListLike): def _setup(self, table_type): self.data = [Column([1, 3], name='x', dtype=np.int32), np.array([2, 4], dtype=np.int32), np.array([3, 5], dtype='i8')] def test_default_names(self, table_type): self._setup(table_type) t = table_type(self.data) assert t.colnames == ['x', 'col1', 'col2'] assert all(t[name].name == name for name in t.colnames) def test_partial_names_dtype(self, table_type): self._setup(table_type) t = table_type(self.data, names=['b', None, 'c'], dtype=['f4', None, 'f8']) assert t.colnames == ['b', 'col1', 'c'] assert t['b'].dtype.type == np.float32 assert t['col1'].dtype.type == np.int32 assert t['c'].dtype.type == np.float64 assert all(t[name].name == name for name in t.colnames) def test_ref(self, table_type): """Test that initializing from a list of columns can be done by reference""" self._setup(table_type) t = table_type(self.data, copy=False) t['x'][0] = 100 assert self.data[0][0] == 100 @pytest.mark.usefixtures('table_type') class TestInitFromNdarrayStruct(BaseInitFromDictLike): def _setup(self, table_type): self.data = np.array([(1, 2, 3), (3, 4, 5)], dtype=[(str('x'), 'i8'), (str('y'), 'i4'), (str('z'), 'i8')]) def test_ndarray_ref(self, table_type): """Init with ndarray and copy=False and show that table uses reference to input ndarray""" self._setup(table_type) t = table_type(self.data, copy=False) t['x'][1] = 0 # Column-wise assignment t[0]['y'] = 0 # Row-wise assignment assert self.data['x'][1] == 0 assert self.data['y'][0] == 0 assert np.all(np.array(t) == self.data) assert all(t[name].name == name for name in t.colnames) def test_partial_names_dtype(self, table_type): self._setup(table_type) t = table_type(self.data, names=['e', None, 'd'], dtype=['f4', None, 'f8']) assert t.colnames == ['e', 'y', 'd'] assert t['e'].dtype.type == np.float32 assert t['y'].dtype.type == np.int32 assert t['d'].dtype.type == np.float64 assert all(t[name].name == name for name in t.colnames) def test_partial_names_ref(self, table_type): self._setup(table_type) t = table_type(self.data, names=['e', None, 'd'], copy=False) assert t.colnames == ['e', 'y', 'd'] assert t['e'].dtype.type == np.int64 assert t['y'].dtype.type == np.int32 assert t['d'].dtype.type == np.int64 assert all(t[name].name == name for name in t.colnames) @pytest.mark.usefixtures('table_type') class TestInitFromDict(BaseInitFromDictLike): def _setup(self, table_type): self.data = dict([('a', Column([1, 3], name='x')), ('b', [2, 4]), ('c', np.array([3, 5], dtype='i8'))]) @pytest.mark.usefixtures('table_type') class TestInitFromMapping(BaseInitFromDictLike): def _setup(self, table_type): self.data = UserDict([('a', Column([1, 3], name='x')), ('b', [2, 4]), ('c', np.array([3, 5], dtype='i8'))]) assert isinstance(self.data, Mapping) assert not isinstance(self.data, dict) @pytest.mark.usefixtures('table_type') class TestInitFromOrderedDict(BaseInitFromDictLike): def _setup(self, table_type): self.data = OrderedDict([('a', Column(name='x', data=[1, 3])), ('b', [2, 4]), ('c', np.array([3, 5], dtype='i8'))]) def test_col_order(self, table_type): self._setup(table_type) t = table_type(self.data) assert t.colnames == ['a', 'b', 'c'] @pytest.mark.usefixtures('table_type') class TestInitFromRow(BaseInitFromDictLike): def _setup(self, table_type): arr = np.array([(1, 2, 3), (3, 4, 5)], dtype=[(str('x'), 'i8'), (str('y'), 'i8'), (str('z'), 'f8')]) self.data = table_type(arr, meta={'comments': ['comment1', 'comment2']}) def test_init_from_row(self, table_type): self._setup(table_type) t = table_type(self.data[0]) # Values and meta match original assert t.meta['comments'][0] == 'comment1' for name in t.colnames: assert np.all(t[name] == self.data[name][0:1]) assert all(t[name].name == name for name in t.colnames) # Change value in new instance and check that original is the same t['x'][0] = 8 t.meta['comments'][1] = 'new comment2' assert np.all(t['x'] == np.array([8])) assert np.all(self.data['x'] == np.array([1, 3])) assert self.data.meta['comments'][1] == 'comment2' @pytest.mark.usefixtures('table_type') class TestInitFromTable(BaseInitFromDictLike): def _setup(self, table_type): arr = np.array([(1, 2, 3), (3, 4, 5)], dtype=[(str('x'), 'i8'), (str('y'), 'i8'), (str('z'), 'f8')]) self.data = table_type(arr, meta={'comments': ['comment1', 'comment2']}) def test_data_meta_copy(self, table_type): self._setup(table_type) t = table_type(self.data) assert t.meta['comments'][0] == 'comment1' t['x'][1] = 8 t.meta['comments'][1] = 'new comment2' assert self.data.meta['comments'][1] == 'comment2' assert np.all(t['x'] == np.array([1, 8])) assert np.all(self.data['x'] == np.array([1, 3])) assert t['z'].name == 'z' assert all(t[name].name == name for name in t.colnames) def test_table_ref(self, table_type): self._setup(table_type) t = table_type(self.data, copy=False) t['x'][1] = 0 assert t['x'][1] == 0 assert self.data['x'][1] == 0 assert np.all(t.as_array() == self.data.as_array()) assert all(t[name].name == name for name in t.colnames) def test_partial_names_dtype(self, table_type): self._setup(table_type) t = table_type(self.data, names=['e', None, 'd'], dtype=['f4', None, 'i8']) assert t.colnames == ['e', 'y', 'd'] assert t['e'].dtype.type == np.float32 assert t['y'].dtype.type == np.int64 assert t['d'].dtype.type == np.int64 assert all(t[name].name == name for name in t.colnames) def test_partial_names_ref(self, table_type): self._setup(table_type) t = table_type(self.data, names=['e', None, 'd'], copy=False) assert t.colnames == ['e', 'y', 'd'] assert t['e'].dtype.type == np.int64 assert t['y'].dtype.type == np.int64 assert t['d'].dtype.type == np.float64 assert all(t[name].name == name for name in t.colnames) def test_init_from_columns(self, table_type): self._setup(table_type) t = table_type(self.data) t2 = table_type(t.columns['z', 'x', 'y']) assert t2.colnames == ['z', 'x', 'y'] assert t2.dtype.names == ('z', 'x', 'y') def test_init_from_columns_slice(self, table_type): self._setup(table_type) t = table_type(self.data) t2 = table_type(t.columns[0:2]) assert t2.colnames == ['x', 'y'] assert t2.dtype.names == ('x', 'y') def test_init_from_columns_mix(self, table_type): self._setup(table_type) t = table_type(self.data) t2 = table_type([t.columns[0], t.columns['z']]) assert t2.colnames == ['x', 'z'] assert t2.dtype.names == ('x', 'z') @pytest.mark.usefixtures('table_type') class TestInitFromNone(): # Note table_table.TestEmptyData tests initializing a completely empty # table and adding data. def test_data_none_with_cols(self, table_type): """ Test different ways of initing an empty table """ np_t = np.empty(0, dtype=[(str('a'), 'f4', (2,)), (str('b'), 'i4')]) for kwargs in ({'names': ('a', 'b')}, {'names': ('a', 'b'), 'dtype': (('f4', (2,)), 'i4')}, {'dtype': [(str('a'), 'f4', (2,)), (str('b'), 'i4')]}, {'dtype': np_t.dtype}): t = table_type(**kwargs) assert t.colnames == ['a', 'b'] assert len(t['a']) == 0 assert len(t['b']) == 0 if 'dtype' in kwargs: assert t['a'].dtype.type == np.float32 assert t['b'].dtype.type == np.int32 assert t['a'].shape[1:] == (2,) @pytest.mark.usefixtures('table_types') class TestInitFromRows(): def test_init_with_rows(self, table_type): for rows in ([[1, 'a'], [2, 'b']], [(1, 'a'), (2, 'b')], ((1, 'a'), (2, 'b'))): t = table_type(rows=rows, names=('a', 'b')) assert np.all(t['a'] == [1, 2]) assert np.all(t['b'] == ['a', 'b']) assert t.colnames == ['a', 'b'] assert t['a'].dtype.kind == 'i' assert t['b'].dtype.kind in ('S', 'U') # Regression test for # https://github.com/astropy/astropy/issues/3052 assert t['b'].dtype.str.endswith('1') rows = np.arange(6).reshape(2, 3) t = table_type(rows=rows, names=('a', 'b', 'c'), dtype=['f8', 'f4', 'i8']) assert np.all(t['a'] == [0, 3]) assert np.all(t['b'] == [1, 4]) assert np.all(t['c'] == [2, 5]) assert t.colnames == ['a', 'b', 'c'] assert t['a'].dtype.str.endswith('f8') assert t['b'].dtype.str.endswith('f4') assert t['c'].dtype.str.endswith('i8') def test_init_with_rows_and_data(self, table_type): with pytest.raises(ValueError) as err: table_type(data=[[1]], rows=[[1]]) assert "Cannot supply both `data` and `rows` values" in str(err) @pytest.mark.usefixtures('table_type') def test_init_and_ref_from_multidim_ndarray(table_type): """ Test that initializing from an ndarray structured array with a multi-dim column works for both copy=False and True and that the referencing is as expected. """ for copy in (False, True): nd = np.array([(1, [10, 20]), (3, [30, 40])], dtype=[(str('a'), 'i8'), (str('b'), 'i8', (2,))]) t = table_type(nd, copy=copy) assert t.colnames == ['a', 'b'] assert t['a'].shape == (2,) assert t['b'].shape == (2, 2) t['a'][0] = -200 t['b'][1][1] = -100 if copy: assert nd[str('a')][0] == 1 assert nd[str('b')][1][1] == 40 else: assert nd[str('a')][0] == -200 assert nd[str('b')][1][1] == -100
fc412da36ace23b6257053b5f9fcd6cf2e19d52888a30613861b0862494f1a7c
import os import re from ..scripts import showtable from ...utils.compat import NUMPY_LT_1_14 ROOT = os.path.abspath(os.path.dirname(__file__)) ASCII_ROOT = os.path.join(ROOT, '..', '..', 'io', 'ascii', 'tests') FITS_ROOT = os.path.join(ROOT, '..', '..', 'io', 'fits', 'tests') VOTABLE_ROOT = os.path.join(ROOT, '..', '..', 'io', 'votable', 'tests') def test_missing_file(capsys): showtable.main(['foobar.fits']) out, err = capsys.readouterr() assert err.startswith("ERROR: [Errno 2] No such file or directory: " "'foobar.fits'") def test_info(capsys): showtable.main([os.path.join(FITS_ROOT, 'data/table.fits'), '--info']) out, err = capsys.readouterr() assert out.splitlines() == ['<Table length=3>', ' name dtype ', '------ -------', 'target bytes20', ' V_mag float32'] def test_stats(capsys): showtable.main([os.path.join(FITS_ROOT, 'data/table.fits'), '--stats']) out, err = capsys.readouterr() if NUMPY_LT_1_14: expected = ('<Table length=3>{0}' ' name mean std min max {0}' '------ ------- ------- ---- ----{0}' 'target -- -- -- --{0}' ' V_mag 12.8667 1.72111 11.1 15.2{0}') else: expected = ('<Table length=3>{0}' ' name mean std min max {0}' '------ --------- --------- ---- ----{0}' 'target -- -- -- --{0}' ' V_mag 12.86666[0-9]? 1.7211105 11.1 15.2{0}') # Here we use re.match as in some cases one of the values above is # platform-dependent. assert re.match(expected.format(os.linesep), out) is not None def test_fits(capsys): showtable.main([os.path.join(FITS_ROOT, 'data/table.fits')]) out, err = capsys.readouterr() assert out.splitlines() == [' target V_mag', '------- -----', 'NGC1001 11.1', 'NGC1002 12.3', 'NGC1003 15.2'] def test_fits_hdu(capsys): showtable.main([os.path.join(FITS_ROOT, 'data/zerowidth.fits'), '--hdu', 'AIPS OF']) out, err = capsys.readouterr() if NUMPY_LT_1_14: assert out.startswith( ' TIME SOURCE ID ANTENNA NO. SUBARRAY FREQ ID ANT FLAG STATUS 1\n' ' DAYS \n' '-------- --------- ----------- -------- ------- -------- --------\n' '0.144387 1 10 1 1 4 4\n') else: assert out.startswith( ' TIME SOURCE ID ANTENNA NO. SUBARRAY FREQ ID ANT FLAG STATUS 1\n' ' DAYS \n' '---------- --------- ----------- -------- ------- -------- --------\n' '0.14438657 1 10 1 1 4 4\n') def test_csv(capsys): showtable.main([os.path.join(ASCII_ROOT, 't/simple_csv.csv')]) out, err = capsys.readouterr() assert out.splitlines() == [' a b c ', '--- --- ---', ' 1 2 3', ' 4 5 6'] def test_ascii_format(capsys): showtable.main([os.path.join(ASCII_ROOT, 't/commented_header.dat'), '--format', 'ascii.commented_header']) out, err = capsys.readouterr() assert out.splitlines() == [' a b c ', '--- --- ---', ' 1 2 3', ' 4 5 6'] def test_ascii_delimiter(capsys): showtable.main([os.path.join(ASCII_ROOT, 't/simple2.txt'), '--format', 'ascii', '--delimiter', '|']) out, err = capsys.readouterr() assert out.splitlines() == [ "obsid redshift X Y object rad ", "----- -------- ---- ---- ----------- ----", " 3102 0.32 4167 4085 Q1250+568-A 9.0", " 3102 0.32 4706 3916 Q1250+568-B 14.0", " 877 0.22 4378 3892 'Source 82' 12.5", ] def test_votable(capsys): showtable.main([os.path.join(VOTABLE_ROOT, 'data/regression.xml'), '--table-id', 'main_table', '--max-width', '50']) out, err = capsys.readouterr() assert out.splitlines() == [ ' string_test string_test_2 ... bitarray2 [16]', '----------------- ------------- ... --------------', ' String & test Fixed stri ... True .. False', 'String &amp; test 0123456789 ... -- .. --', ' XXXX XXXX ... -- .. --', ' ... -- .. --', ' ... -- .. --', ] def test_max_lines(capsys): showtable.main([os.path.join(ASCII_ROOT, 't/cds2.dat'), '--format', 'ascii.cds', '--max-lines', '7', '--max-width', '30']) out, err = capsys.readouterr() assert out.splitlines() == [ ' SST ... Note', ' ... ', '--------------- ... ----', '041314.1+281910 ... --', ' ... ... ...', '044427.1+251216 ... --', '044642.6+245903 ... --', 'Length = 215 rows', ] def test_show_dtype(capsys): showtable.main([os.path.join(FITS_ROOT, 'data/table.fits'), '--show-dtype']) out, err = capsys.readouterr() assert out.splitlines() == [ ' target V_mag ', 'bytes20 float32', '------- -------', 'NGC1001 11.1', 'NGC1002 12.3', 'NGC1003 15.2', ] def test_hide_unit(capsys): showtable.main([os.path.join(ASCII_ROOT, 't/cds.dat'), '--format', 'ascii.cds']) out, err = capsys.readouterr() assert out.splitlines() == [ 'Index RAh RAm RAs DE- DEd DEm DEs Match Class AK Fit ', ' h min s deg arcmin arcsec mag ', '----- --- --- ----- --- --- ------ ------ ----- ----- --- ----', ' 1 3 28 39.09 + 31 6 1.9 -- I* -- 1.35', ] showtable.main([os.path.join(ASCII_ROOT, 't/cds.dat'), '--format', 'ascii.cds', '--hide-unit']) out, err = capsys.readouterr() assert out.splitlines() == [ 'Index RAh RAm RAs DE- DEd DEm DEs Match Class AK Fit ', '----- --- --- ----- --- --- --- --- ----- ----- --- ----', ' 1 3 28 39.09 + 31 6 1.9 -- I* -- 1.35', ]
d3b39d372f96e28929fd7375afced7d3ebb3a1490b865425069d7496d1086345
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from ...tests.helper import catch_warnings from ...table import Table, Column from ...utils.exceptions import AstropyUserWarning def sort_eq(list1, list2): return sorted(list1) == sorted(list2) def test_column_group_by(T1): for masked in (False, True): t1 = Table(T1, masked=masked) t1a = t1['a'].copy() # Group by a Column (i.e. numpy array) t1ag = t1a.group_by(t1['a']) assert np.all(t1ag.groups.indices == np.array([0, 1, 4, 8])) # Group by a Table t1ag = t1a.group_by(t1['a', 'b']) assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8])) # Group by a numpy structured array t1ag = t1a.group_by(t1['a', 'b'].as_array()) assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8])) def test_table_group_by(T1): """ Test basic table group_by functionality for possible key types and for masked/unmasked tables. """ for masked in (False, True): t1 = Table(T1, masked=masked) # Group by a single column key specified by name tg = t1.group_by('a') assert np.all(tg.groups.indices == np.array([0, 1, 4, 8])) assert str(tg.groups) == "<TableGroups indices=[0 1 4 8]>" assert str(tg['a'].groups) == "<ColumnGroups indices=[0 1 4 8]>" # Sorted by 'a' and in original order for rest assert tg.pformat() == [' a b c d ', '--- --- --- ---', ' 0 a 0.0 4', ' 1 b 3.0 5', ' 1 a 2.0 6', ' 1 a 1.0 7', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 2 b 6.0 2', ' 2 a 4.0 3'] assert tg.meta['ta'] == 1 assert tg['c'].meta['a'] == 1 assert tg['c'].description == 'column c' # Group by a table column tg2 = t1.group_by(t1['a']) assert tg.pformat() == tg2.pformat() # Group by two columns spec'd by name for keys in (['a', 'b'], ('a', 'b')): tg = t1.group_by(keys) assert np.all(tg.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8])) # Sorted by 'a', 'b' and in original order for rest assert tg.pformat() == [' a b c d ', '--- --- --- ---', ' 0 a 0.0 4', ' 1 a 2.0 6', ' 1 a 1.0 7', ' 1 b 3.0 5', ' 2 a 4.0 3', ' 2 b 5.0 1', ' 2 b 6.0 2', ' 2 c 7.0 0'] # Group by a Table tg2 = t1.group_by(t1['a', 'b']) assert tg.pformat() == tg2.pformat() # Group by a structured array tg2 = t1.group_by(t1['a', 'b'].as_array()) assert tg.pformat() == tg2.pformat() # Group by a simple ndarray tg = t1.group_by(np.array([0, 1, 0, 1, 2, 1, 0, 0])) assert np.all(tg.groups.indices == np.array([0, 4, 7, 8])) assert tg.pformat() == [' a b c d ', '--- --- --- ---', ' 2 c 7.0 0', ' 2 b 6.0 2', ' 1 a 2.0 6', ' 1 a 1.0 7', ' 2 b 5.0 1', ' 2 a 4.0 3', ' 1 b 3.0 5', ' 0 a 0.0 4'] def test_groups_keys(T1): tg = T1.group_by('a') keys = tg.groups.keys assert keys.dtype.names == ('a',) assert np.all(keys['a'] == np.array([0, 1, 2])) tg = T1.group_by(['a', 'b']) keys = tg.groups.keys assert keys.dtype.names == ('a', 'b') assert np.all(keys['a'] == np.array([0, 1, 1, 2, 2, 2])) assert np.all(keys['b'] == np.array(['a', 'a', 'b', 'a', 'b', 'c'])) # Grouping by Column ignores column name tg = T1.group_by(T1['b']) keys = tg.groups.keys assert keys.dtype.names is None def test_groups_iterator(T1): tg = T1.group_by('a') for ii, group in enumerate(tg.groups): assert group.pformat() == tg.groups[ii].pformat() assert group['a'][0] == tg['a'][tg.groups.indices[ii]] def test_grouped_copy(T1): """ Test that copying a table or column copies the groups properly """ for masked in (False, True): t1 = Table(T1, masked=masked) tg = t1.group_by('a') tgc = tg.copy() assert np.all(tgc.groups.indices == tg.groups.indices) assert np.all(tgc.groups.keys == tg.groups.keys) tac = tg['a'].copy() assert np.all(tac.groups.indices == tg['a'].groups.indices) c1 = t1['a'].copy() gc1 = c1.group_by(t1['a']) gc1c = gc1.copy() assert np.all(gc1c.groups.indices == np.array([0, 1, 4, 8])) def test_grouped_slicing(T1): """ Test that slicing a table removes previous grouping """ for masked in (False, True): t1 = Table(T1, masked=masked) # Regular slice of a table tg = t1.group_by('a') tg2 = tg[3:5] assert np.all(tg2.groups.indices == np.array([0, len(tg2)])) assert tg2.groups.keys is None def test_group_column_from_table(T1): """ Group a column that is part of a table """ cg = T1['c'].group_by(np.array(T1['a'])) assert np.all(cg.groups.keys == np.array([0, 1, 2])) assert np.all(cg.groups.indices == np.array([0, 1, 4, 8])) def test_table_groups_mask_index(T1): """ Use boolean mask as item in __getitem__ for groups """ for masked in (False, True): t1 = Table(T1, masked=masked).group_by('a') t2 = t1.groups[np.array([True, False, True])] assert len(t2.groups) == 2 assert t2.groups[0].pformat() == t1.groups[0].pformat() assert t2.groups[1].pformat() == t1.groups[2].pformat() assert np.all(t2.groups.keys['a'] == np.array([0, 2])) def test_table_groups_array_index(T1): """ Use numpy array as item in __getitem__ for groups """ for masked in (False, True): t1 = Table(T1, masked=masked).group_by('a') t2 = t1.groups[np.array([0, 2])] assert len(t2.groups) == 2 assert t2.groups[0].pformat() == t1.groups[0].pformat() assert t2.groups[1].pformat() == t1.groups[2].pformat() assert np.all(t2.groups.keys['a'] == np.array([0, 2])) def test_table_groups_slicing(T1): """ Test that slicing table groups works """ for masked in (False, True): t1 = Table(T1, masked=masked).group_by('a') # slice(0, 2) t2 = t1.groups[0:2] assert len(t2.groups) == 2 assert t2.groups[0].pformat() == t1.groups[0].pformat() assert t2.groups[1].pformat() == t1.groups[1].pformat() assert np.all(t2.groups.keys['a'] == np.array([0, 1])) # slice(1, 2) t2 = t1.groups[1:2] assert len(t2.groups) == 1 assert t2.groups[0].pformat() == t1.groups[1].pformat() assert np.all(t2.groups.keys['a'] == np.array([1])) # slice(0, 3, 2) t2 = t1.groups[0:3:2] assert len(t2.groups) == 2 assert t2.groups[0].pformat() == t1.groups[0].pformat() assert t2.groups[1].pformat() == t1.groups[2].pformat() assert np.all(t2.groups.keys['a'] == np.array([0, 2])) def test_grouped_item_access(T1): """ Test that column slicing preserves grouping """ for masked in (False, True): t1 = Table(T1, masked=masked) # Regular slice of a table tg = t1.group_by('a') tgs = tg['a', 'c', 'd'] assert np.all(tgs.groups.keys == tg.groups.keys) assert np.all(tgs.groups.indices == tg.groups.indices) tgsa = tgs.groups.aggregate(np.sum) assert tgsa.pformat() == [' a c d ', '--- ---- ---', ' 0 0.0 4', ' 1 6.0 18', ' 2 22.0 6'] tgs = tg['c', 'd'] assert np.all(tgs.groups.keys == tg.groups.keys) assert np.all(tgs.groups.indices == tg.groups.indices) tgsa = tgs.groups.aggregate(np.sum) assert tgsa.pformat() == [' c d ', '---- ---', ' 0.0 4', ' 6.0 18', '22.0 6'] def test_mutable_operations(T1): """ Operations like adding or deleting a row should removing grouping, but adding or removing or renaming a column should retain grouping. """ for masked in (False, True): t1 = Table(T1, masked=masked) # add row tg = t1.group_by('a') tg.add_row((0, 'a', 3.0, 4)) assert np.all(tg.groups.indices == np.array([0, len(tg)])) assert tg.groups.keys is None # remove row tg = t1.group_by('a') tg.remove_row(4) assert np.all(tg.groups.indices == np.array([0, len(tg)])) assert tg.groups.keys is None # add column tg = t1.group_by('a') indices = tg.groups.indices.copy() tg.add_column(Column(name='e', data=np.arange(len(tg)))) assert np.all(tg.groups.indices == indices) assert np.all(tg['e'].groups.indices == indices) assert np.all(tg['e'].groups.keys == tg.groups.keys) # remove column (not key column) tg = t1.group_by('a') tg.remove_column('b') assert np.all(tg.groups.indices == indices) # Still has original key col names assert tg.groups.keys.dtype.names == ('a',) assert np.all(tg['a'].groups.indices == indices) # remove key column tg = t1.group_by('a') tg.remove_column('a') assert np.all(tg.groups.indices == indices) assert tg.groups.keys.dtype.names == ('a',) assert np.all(tg['b'].groups.indices == indices) # rename key column tg = t1.group_by('a') tg.rename_column('a', 'aa') assert np.all(tg.groups.indices == indices) assert tg.groups.keys.dtype.names == ('a',) assert np.all(tg['aa'].groups.indices == indices) def test_group_by_masked(T1): t1m = Table(T1, masked=True) t1m['c'].mask[4] = True t1m['d'].mask[5] = True assert t1m.group_by('a').pformat() == [' a b c d ', '--- --- --- ---', ' 0 a -- 4', ' 1 b 3.0 --', ' 1 a 2.0 6', ' 1 a 1.0 7', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 2 b 6.0 2', ' 2 a 4.0 3'] def test_group_by_errors(T1): """ Appropriate errors get raised. """ # Bad column name as string with pytest.raises(ValueError): T1.group_by('f') # Bad column names in list with pytest.raises(ValueError): T1.group_by(['f', 'g']) # Wrong length array with pytest.raises(ValueError): T1.group_by(np.array([1, 2])) # Wrong type with pytest.raises(TypeError): T1.group_by(None) # Masked key column t1 = Table(T1, masked=True) t1['a'].mask[4] = True with pytest.raises(ValueError): t1.group_by('a') def test_groups_keys_meta(T1): """ Make sure the keys meta['grouped_by_table_cols'] is working. """ # Group by column in this table tg = T1.group_by('a') assert tg.groups.keys.meta['grouped_by_table_cols'] is True assert tg['c'].groups.keys.meta['grouped_by_table_cols'] is True assert tg.groups[1].groups.keys.meta['grouped_by_table_cols'] is True assert (tg['d'].groups[np.array([False, True, True])] .groups.keys.meta['grouped_by_table_cols'] is True) # Group by external Table tg = T1.group_by(T1['a', 'b']) assert tg.groups.keys.meta['grouped_by_table_cols'] is False assert tg['c'].groups.keys.meta['grouped_by_table_cols'] is False assert tg.groups[1].groups.keys.meta['grouped_by_table_cols'] is False # Group by external numpy array tg = T1.group_by(T1['a', 'b'].as_array()) assert not hasattr(tg.groups.keys, 'meta') assert not hasattr(tg['c'].groups.keys, 'meta') # Group by Column tg = T1.group_by(T1['a']) assert 'grouped_by_table_cols' not in tg.groups.keys.meta assert 'grouped_by_table_cols' not in tg['c'].groups.keys.meta def test_table_aggregate(T1): """ Aggregate a table """ # Table with only summable cols t1 = T1['a', 'c', 'd'] tg = t1.group_by('a') tga = tg.groups.aggregate(np.sum) assert tga.pformat() == [' a c d ', '--- ---- ---', ' 0 0.0 4', ' 1 6.0 18', ' 2 22.0 6'] # Reverts to default groups assert np.all(tga.groups.indices == np.array([0, 3])) assert tga.groups.keys is None # metadata survives assert tga.meta['ta'] == 1 assert tga['c'].meta['a'] == 1 assert tga['c'].description == 'column c' # Aggregate with np.sum with masked elements. This results # in one group with no elements, hence a nan result and conversion # to float for the 'd' column. t1m = Table(t1, masked=True) t1m['c'].mask[4:6] = True t1m['d'].mask[4:6] = True tg = t1m.group_by('a') with catch_warnings(Warning) as warning_lines: tga = tg.groups.aggregate(np.sum) assert warning_lines[0].category == UserWarning assert "converting a masked element to nan" in str(warning_lines[0].message) assert tga.pformat() == [' a c d ', '--- ---- ----', ' 0 nan nan', ' 1 3.0 13.0', ' 2 22.0 6.0'] # Aggregrate with np.sum with masked elements, but where every # group has at least one remaining (unmasked) element. Then # the int column stays as an int. t1m = Table(t1, masked=True) t1m['c'].mask[5] = True t1m['d'].mask[5] = True tg = t1m.group_by('a') tga = tg.groups.aggregate(np.sum) assert tga.pformat() == [' a c d ', '--- ---- ---', ' 0 0.0 4', ' 1 3.0 13', ' 2 22.0 6'] # Aggregate with a column type that cannot by supplied to the aggregating # function. This raises a warning but still works. tg = T1.group_by('a') with catch_warnings(Warning) as warning_lines: tga = tg.groups.aggregate(np.sum) assert warning_lines[0].category == AstropyUserWarning assert "Cannot aggregate column" in str(warning_lines[0].message) assert tga.pformat() == [' a c d ', '--- ---- ---', ' 0 0.0 4', ' 1 6.0 18', ' 2 22.0 6'] def test_table_aggregate_reduceat(T1): """ Aggregate table with functions which have a reduceat method """ # Comparison functions without reduceat def np_mean(x): return np.mean(x) def np_sum(x): return np.sum(x) def np_add(x): return np.add(x) # Table with only summable cols t1 = T1['a', 'c', 'd'] tg = t1.group_by('a') # Comparison tga_r = tg.groups.aggregate(np.sum) tga_a = tg.groups.aggregate(np.add) tga_n = tg.groups.aggregate(np_sum) assert np.all(tga_r == tga_n) assert np.all(tga_a == tga_n) assert tga_n.pformat() == [' a c d ', '--- ---- ---', ' 0 0.0 4', ' 1 6.0 18', ' 2 22.0 6'] tga_r = tg.groups.aggregate(np.mean) tga_n = tg.groups.aggregate(np_mean) assert np.all(tga_r == tga_n) assert tga_n.pformat() == [' a c d ', '--- --- ---', ' 0 0.0 4.0', ' 1 2.0 6.0', ' 2 5.5 1.5'] # Binary ufunc np_add should raise warning without reduceat t2 = T1['a', 'c'] tg = t2.group_by('a') with catch_warnings(Warning) as warning_lines: tga = tg.groups.aggregate(np_add) assert warning_lines[0].category == AstropyUserWarning assert "Cannot aggregate column" in str(warning_lines[0].message) assert tga.pformat() == [' a ', '---', ' 0', ' 1', ' 2'] def test_column_aggregate(T1): """ Aggregate a single table column """ for masked in (False, True): tg = Table(T1, masked=masked).group_by('a') tga = tg['c'].groups.aggregate(np.sum) assert tga.pformat() == [' c ', '----', ' 0.0', ' 6.0', '22.0'] def test_table_filter(): """ Table groups filtering """ def all_positive(table, key_colnames): colnames = [name for name in table.colnames if name not in key_colnames] for colname in colnames: if np.any(table[colname] < 0): return False return True # Negative value in 'a' column should not filter because it is a key col t = Table.read([' a c d', ' -2 7.0 0', ' -2 5.0 1', ' 0 0.0 4', ' 1 3.0 5', ' 1 2.0 -6', ' 1 1.0 7', ' 3 3.0 5', ' 3 -2.0 6', ' 3 1.0 7', ], format='ascii') tg = t.group_by('a') t2 = tg.groups.filter(all_positive) assert t2.groups[0].pformat() == [' a c d ', '--- --- ---', ' -2 7.0 0', ' -2 5.0 1'] assert t2.groups[1].pformat() == [' a c d ', '--- --- ---', ' 0 0.0 4'] def test_column_filter(): """ Table groups filtering """ def all_positive(column): if np.any(column < 0): return False return True # Negative value in 'a' column should not filter because it is a key col t = Table.read([' a c d', ' -2 7.0 0', ' -2 5.0 1', ' 0 0.0 4', ' 1 3.0 5', ' 1 2.0 -6', ' 1 1.0 7', ' 3 3.0 5', ' 3 -2.0 6', ' 3 1.0 7', ], format='ascii') tg = t.group_by('a') c2 = tg['c'].groups.filter(all_positive) assert len(c2.groups) == 3 assert c2.groups[0].pformat() == [' c ', '---', '7.0', '5.0'] assert c2.groups[1].pformat() == [' c ', '---', '0.0'] assert c2.groups[2].pformat() == [' c ', '---', '3.0', '2.0', '1.0']
260d43fdc52f0bfdc2a116ee5c17f588827e8cac63934c473188123ecee65a63
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Test behavior related to masked tables""" import pytest import numpy as np import numpy.ma as ma from ...table import Column, MaskedColumn, Table class SetupData: def setup_method(self, method): self.a = MaskedColumn(name='a', data=[1, 2, 3], fill_value=1) self.b = MaskedColumn(name='b', data=[4, 5, 6], mask=True) self.c = MaskedColumn(name='c', data=[7, 8, 9], mask=False) self.d_mask = np.array([False, True, False]) self.d = MaskedColumn(name='d', data=[7, 8, 7], mask=self.d_mask) self.t = Table([self.a, self.b], masked=True) self.ca = Column(name='ca', data=[1, 2, 3]) class TestPprint(SetupData): def test_pformat(self): assert self.t.pformat() == [' a b ', '--- ---', ' 1 --', ' 2 --', ' 3 --'] class TestFilled: """Test the filled method in MaskedColumn and Table""" def setup_method(self, method): mask = [True, False, False] self.meta = {'a': 1, 'b': [2, 3]} a = self.a = MaskedColumn(name='a', data=[1, 2, 3], fill_value=10, mask=mask, meta={'a': 1}) b = self.b = MaskedColumn(name='b', data=[4.0, 5.0, 6.0], fill_value=10.0, mask=mask) c = self.c = MaskedColumn(name='c', data=['7', '8', '9'], fill_value='1', mask=mask) def test_filled_column(self): f = self.a.filled() assert np.all(f == [10, 2, 3]) assert isinstance(f, Column) assert not isinstance(f, MaskedColumn) # Confirm copy, not ref assert f.meta['a'] == 1 f.meta['a'] = 2 f[1] = 100 assert self.a[1] == 2 assert self.a.meta['a'] == 1 # Fill with arg fill_value not column fill_value f = self.a.filled(20) assert np.all(f == [20, 2, 3]) f = self.b.filled() assert np.all(f == [10.0, 5.0, 6.0]) assert isinstance(f, Column) f = self.c.filled() assert np.all(f == ['1', '8', '9']) assert isinstance(f, Column) def test_filled_masked_table(self, tableclass): t = tableclass([self.a, self.b, self.c], meta=self.meta) f = t.filled() assert isinstance(f, Table) assert f.masked is False assert np.all(f['a'] == [10, 2, 3]) assert np.allclose(f['b'], [10.0, 5.0, 6.0]) assert np.all(f['c'] == ['1', '8', '9']) # Confirm copy, not ref assert f.meta['b'] == [2, 3] f.meta['b'][0] = 20 assert t.meta['b'] == [2, 3] f['a'][2] = 100 assert t['a'][2] == 3 def test_filled_unmasked_table(self, tableclass): t = tableclass([(1, 2), ('3', '4')], names=('a', 'b'), meta=self.meta) f = t.filled() assert isinstance(f, Table) assert f.masked is False assert np.all(f['a'] == t['a']) assert np.all(f['b'] == t['b']) # Confirm copy, not ref assert f.meta['b'] == [2, 3] f.meta['b'][0] = 20 assert t.meta['b'] == [2, 3] f['a'][1] = 100 assert t['a'][1] == 2 class TestFillValue(SetupData): """Test setting and getting fill value in MaskedColumn and Table""" def test_init_set_fill_value(self): """Check that setting fill_value in the MaskedColumn init works""" assert self.a.fill_value == 1 c = MaskedColumn(name='c', data=['xxxx', 'yyyy'], fill_value='none') assert c.fill_value == 'none' def test_set_get_fill_value_for_bare_column(self): """Check set and get of fill value works for bare Column""" self.d.fill_value = -999 assert self.d.fill_value == -999 assert np.all(self.d.filled() == [7, -999, 7]) def test_set_get_fill_value_for_str_column(self): c = MaskedColumn(name='c', data=['xxxx', 'yyyy'], mask=[True, False]) # assert np.all(c.filled() == ['N/A', 'yyyy']) c.fill_value = 'ABCDEF' assert c.fill_value == 'ABCD' # string truncated to dtype length assert np.all(c.filled() == ['ABCD', 'yyyy']) assert np.all(c.filled('XY') == ['XY', 'yyyy']) def test_table_column_mask_not_ref(self): """Table column mask is not ref of original column mask""" self.b.fill_value = -999 assert self.t['b'].fill_value != -999 def test_set_get_fill_value_for_table_column(self): """Check set and get of fill value works for Column in a Table""" self.t['b'].fill_value = 1 assert self.t['b'].fill_value == 1 assert np.all(self.t['b'].filled() == [1, 1, 1]) def test_data_attribute_fill_and_mask(self): """Check that .data attribute preserves fill_value and mask""" self.t['b'].fill_value = 1 self.t['b'].mask = [True, False, True] assert self.t['b'].data.fill_value == 1 assert np.all(self.t['b'].data.mask == [True, False, True]) class TestMaskedColumnInit(SetupData): """Initialization of a masked column""" def test_set_mask_and_not_ref(self): """Check that mask gets set properly and that it is a copy, not ref""" assert np.all(~self.a.mask) assert np.all(self.b.mask) assert np.all(~self.c.mask) assert np.all(self.d.mask == self.d_mask) self.d.mask[0] = True assert not np.all(self.d.mask == self.d_mask) def test_set_mask_from_list(self): """Set mask from a list""" mask_list = [False, True, False] a = MaskedColumn(name='a', data=[1, 2, 3], mask=mask_list) assert np.all(a.mask == mask_list) def test_override_existing_mask(self): """Override existing mask values""" mask_list = [False, True, False] b = MaskedColumn(name='b', data=self.b, mask=mask_list) assert np.all(b.mask == mask_list) def test_incomplete_mask_spec(self): """Incomplete mask specification raises MaskError""" mask_list = [False, True] with pytest.raises(ma.MaskError): MaskedColumn(name='b', length=4, mask=mask_list) class TestTableInit(SetupData): """Initializing a table""" def test_mask_true_if_any_input_masked(self): """Masking is True if any input is masked""" t = Table([self.ca, self.a]) assert t.masked is True t = Table([self.ca]) assert t.masked is False t = Table([self.ca, ma.array([1, 2, 3])]) assert t.masked is True def test_mask_false_if_no_input_masked(self): """Masking not true if not (requested or input requires mask)""" t0 = Table([[3, 4]], masked=False) t1 = Table(t0, masked=True) t2 = Table(t1, masked=False) assert not t0.masked assert t1.masked assert not t2.masked def test_mask_property(self): t = self.t # Access table mask (boolean structured array) by column name assert np.all(t.mask['a'] == np.array([False, False, False])) assert np.all(t.mask['b'] == np.array([True, True, True])) # Check that setting mask from table mask has the desired effect on column t.mask['b'] = np.array([False, True, False]) assert np.all(t['b'].mask == np.array([False, True, False])) # Non-masked table returns None for mask attribute t2 = Table([self.ca], masked=False) assert t2.mask is None # Set mask property globally and verify local correctness for mask in (True, False): t.mask = mask for name in ('a', 'b'): assert np.all(t[name].mask == mask) class TestAddColumn: def test_add_masked_column_to_masked_table(self): t = Table(masked=True) assert t.masked t.add_column(MaskedColumn(name='a', data=[1, 2, 3], mask=[0, 1, 0])) assert t.masked t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[1, 0, 1])) assert t.masked assert np.all(t['a'] == np.array([1, 2, 3])) assert np.all(t['a'].mask == np.array([0, 1, 0], bool)) assert np.all(t['b'] == np.array([4, 5, 6])) assert np.all(t['b'].mask == np.array([1, 0, 1], bool)) def test_add_masked_column_to_non_masked_table(self): t = Table(masked=False) assert not t.masked t.add_column(Column(name='a', data=[1, 2, 3])) assert not t.masked t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[1, 0, 1])) assert t.masked assert np.all(t['a'] == np.array([1, 2, 3])) assert np.all(t['a'].mask == np.array([0, 0, 0], bool)) assert np.all(t['b'] == np.array([4, 5, 6])) assert np.all(t['b'].mask == np.array([1, 0, 1], bool)) def test_add_non_masked_column_to_masked_table(self): t = Table(masked=True) assert t.masked t.add_column(Column(name='a', data=[1, 2, 3])) assert t.masked t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[1, 0, 1])) assert t.masked assert np.all(t['a'] == np.array([1, 2, 3])) assert np.all(t['a'].mask == np.array([0, 0, 0], bool)) assert np.all(t['b'] == np.array([4, 5, 6])) assert np.all(t['b'].mask == np.array([1, 0, 1], bool)) def test_convert_to_masked_table_only_if_necessary(self): # Do not convert to masked table, if new column has no masked value. # See #1185 for details. t = Table(masked=False) assert not t.masked t.add_column(Column(name='a', data=[1, 2, 3])) assert not t.masked t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[0, 0, 0])) assert not t.masked assert np.all(t['a'] == np.array([1, 2, 3])) assert np.all(t['b'] == np.array([4, 5, 6])) class TestRenameColumn: def test_rename_masked_column(self): t = Table(masked=True) t.add_column(MaskedColumn(name='a', data=[1, 2, 3], mask=[0, 1, 0])) t['a'].fill_value = 42 t.rename_column('a', 'b') assert t.masked assert np.all(t['b'] == np.array([1, 2, 3])) assert np.all(t['b'].mask == np.array([0, 1, 0], bool)) assert t['b'].fill_value == 42 assert t.colnames == ['b'] class TestRemoveColumn: def test_remove_masked_column(self): t = Table(masked=True) t.add_column(MaskedColumn(name='a', data=[1, 2, 3], mask=[0, 1, 0])) t['a'].fill_value = 42 t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[1, 0, 1])) t.remove_column('b') assert t.masked assert np.all(t['a'] == np.array([1, 2, 3])) assert np.all(t['a'].mask == np.array([0, 1, 0], bool)) assert t['a'].fill_value == 42 assert t.colnames == ['a'] class TestAddRow: def test_add_masked_row_to_masked_table_iterable(self): t = Table(masked=True) t.add_column(MaskedColumn(name='a', data=[1], mask=[0])) t.add_column(MaskedColumn(name='b', data=[4], mask=[1])) t.add_row([2, 5], mask=[1, 0]) t.add_row([3, 6], mask=[0, 1]) assert t.masked assert np.all(np.array(t['a']) == np.array([1, 2, 3])) assert np.all(t['a'].mask == np.array([0, 1, 0], bool)) assert np.all(np.array(t['b']) == np.array([4, 5, 6])) assert np.all(t['b'].mask == np.array([1, 0, 1], bool)) def test_add_masked_row_to_masked_table_mapping1(self): t = Table(masked=True) t.add_column(MaskedColumn(name='a', data=[1], mask=[0])) t.add_column(MaskedColumn(name='b', data=[4], mask=[1])) t.add_row({'b': 5, 'a': 2}, mask={'a': 1, 'b': 0}) t.add_row({'a': 3, 'b': 6}, mask={'b': 1, 'a': 0}) assert t.masked assert np.all(np.array(t['a']) == np.array([1, 2, 3])) assert np.all(t['a'].mask == np.array([0, 1, 0], bool)) assert np.all(np.array(t['b']) == np.array([4, 5, 6])) assert np.all(t['b'].mask == np.array([1, 0, 1], bool)) def test_add_masked_row_to_masked_table_mapping2(self): # When adding values to a masked table, if the mask is specified as a # dict, then values not specified will have mask values set to True t = Table(masked=True) t.add_column(MaskedColumn(name='a', data=[1], mask=[0])) t.add_column(MaskedColumn(name='b', data=[4], mask=[1])) t.add_row({'b': 5}, mask={'b': 0}) t.add_row({'a': 3}, mask={'a': 0}) assert t.masked assert t['a'][0] == 1 and t['a'][2] == 3 assert np.all(t['a'].mask == np.array([0, 1, 0], bool)) assert t['b'][1] == 5 assert np.all(t['b'].mask == np.array([1, 0, 1], bool)) def test_add_masked_row_to_masked_table_mapping3(self): # When adding values to a masked table, if mask is not passed to # add_row, then the mask should be set to False if values are present # and True if not. t = Table(masked=True) t.add_column(MaskedColumn(name='a', data=[1], mask=[0])) t.add_column(MaskedColumn(name='b', data=[4], mask=[1])) t.add_row({'b': 5}) t.add_row({'a': 3}) assert t.masked assert t['a'][0] == 1 and t['a'][2] == 3 assert np.all(t['a'].mask == np.array([0, 1, 0], bool)) assert t['b'][1] == 5 assert np.all(t['b'].mask == np.array([1, 0, 1], bool)) def test_add_masked_row_to_masked_table_mapping4(self): # When adding values to a masked table, if the mask is specified as a # dict, then keys in values should match keys in mask t = Table(masked=True) t.add_column(MaskedColumn(name='a', data=[1], mask=[0])) t.add_column(MaskedColumn(name='b', data=[4], mask=[1])) with pytest.raises(ValueError) as exc: t.add_row({'b': 5}, mask={'a': True}) assert exc.value.args[0] == 'keys in mask should match keys in vals' def test_add_masked_row_to_masked_table_mismatch(self): t = Table(masked=True) t.add_column(MaskedColumn(name='a', data=[1], mask=[0])) t.add_column(MaskedColumn(name='b', data=[4], mask=[1])) with pytest.raises(TypeError) as exc: t.add_row([2, 5], mask={'a': 1, 'b': 0}) assert exc.value.args[0] == "Mismatch between type of vals and mask" with pytest.raises(TypeError) as exc: t.add_row({'b': 5, 'a': 2}, mask=[1, 0]) assert exc.value.args[0] == "Mismatch between type of vals and mask" def test_add_masked_row_to_non_masked_table_iterable(self): t = Table(masked=False) t.add_column(Column(name='a', data=[1])) t.add_column(Column(name='b', data=[4])) assert not t.masked t.add_row([2, 5]) assert not t.masked t.add_row([3, 6], mask=[0, 1]) assert t.masked assert np.all(np.array(t['a']) == np.array([1, 2, 3])) assert np.all(t['a'].mask == np.array([0, 0, 0], bool)) assert np.all(np.array(t['b']) == np.array([4, 5, 6])) assert np.all(t['b'].mask == np.array([0, 0, 1], bool)) def test_setting_from_masked_column(): """Test issue in #2997""" mask_b = np.array([True, True, False, False]) for select in (mask_b, slice(0, 2)): t = Table(masked=True) t['a'] = Column([1, 2, 3, 4]) t['b'] = MaskedColumn([11, 22, 33, 44], mask=mask_b) t['c'] = MaskedColumn([111, 222, 333, 444], mask=[True, False, True, False]) t['b'][select] = t['c'][select] assert t['b'][1] == t[1]['b'] assert t['b'][0] is np.ma.masked # Original state since t['c'][0] is masked assert t['b'][1] == 222 # New from t['c'] since t['c'][1] is unmasked assert t['b'][2] == 33 assert t['b'][3] == 44 assert np.all(t['b'].mask == t.mask['b']) # Avoid t.mask in general, this is for testing mask_before_add = t.mask.copy() t['d'] = np.arange(len(t)) assert np.all(t.mask['b'] == mask_before_add['b']) def test_coercing_fill_value_type(): """ Test that masked column fill_value is coerced into the correct column type. """ # This is the original example posted on the astropy@scipy mailing list t = Table({'a': ['1']}, masked=True) t['a'].set_fill_value('0') t2 = Table(t, names=['a'], dtype=[np.int32]) assert isinstance(t2['a'].fill_value, np.int32) # Unit test the same thing. c = MaskedColumn(['1']) c.set_fill_value('0') c2 = MaskedColumn(c, dtype=np.int32) assert isinstance(c2.fill_value, np.int32)
882dbb46fd90e43c59e227c530440cb1421cef6aec69b92052e0776100f524d8
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Verify item access API in: https://github.com/astropy/astropy/wiki/Table-item-access-definition """ import pytest import numpy as np @pytest.mark.usefixtures('table_data') class BaseTestItems(): pass @pytest.mark.usefixtures('table_data') class TestTableColumnsItems(BaseTestItems): def test_by_name(self, table_data): """Access TableColumns by name and show that item access returns a Column that refers to underlying table data""" self.t = table_data.Table(table_data.COLS) self.tc = self.t.columns assert self.tc['a'].name == 'a' assert self.tc['a'][1] == 2 assert self.tc['a'].description == 'da' assert self.tc['a'].format == '%i' assert self.tc['a'].meta == {'ma': 1} assert self.tc['a'].unit == 'ua' assert self.tc['a'].attrs_equal(table_data.COLS[0]) assert isinstance(self.tc['a'], table_data.Column) self.tc['b'][1] = 0 assert self.t['b'][1] == 0 def test_by_position(self, table_data): """Access TableColumns by position and show that item access returns a Column that refers to underlying table data""" self.t = table_data.Table(table_data.COLS) self.tc = self.t.columns assert self.tc[1].name == 'b' assert np.all(self.tc[1].data == table_data.COLS[1].data) assert self.tc[1].description == 'db' assert self.tc[1].format == '%d' assert self.tc[1].meta == {'mb': 1} assert self.tc[1].unit == 'ub' assert self.tc[1].attrs_equal(table_data.COLS[1]) assert isinstance(self.tc[1], table_data.Column) assert self.tc[2].unit == 'ub' self.tc[1][1] = 0 assert self.t['b'][1] == 0 def test_mult_columns(self, table_data): """Access TableColumns with "fancy indexing" and showed that returned TableColumns object still references original data""" self.t = table_data.Table(table_data.COLS) self.tc = self.t.columns tc2 = self.tc['b', 'c'] assert tc2[1].name == 'c' assert tc2[1][1] == 8 assert tc2[0].name == 'b' assert tc2[0][1] == 5 tc2['c'][1] = 0 assert self.tc['c'][1] == 0 assert self.t['c'][1] == 0 def test_column_slice(self, table_data): """Access TableColumns with slice and showed that returned TableColumns object still references original data""" self.t = table_data.Table(table_data.COLS) self.tc = self.t.columns tc2 = self.tc[1:3] assert tc2[1].name == 'c' assert tc2[1][1] == 8 assert tc2[0].name == 'b' assert tc2[0][1] == 5 tc2['c'][1] = 0 assert self.tc['c'][1] == 0 assert self.t['c'][1] == 0 @pytest.mark.usefixtures('table_data') class TestTableItems(BaseTestItems): @pytest.mark.parametrize("idx", [1, np.int64(1), np.array(1)]) def test_column(self, table_data, idx): """Column access returns REFERENCE to data""" self.t = table_data.Table(table_data.COLS) self.tc = self.t.columns a = self.t['a'] assert a[idx] == 2 a[idx] = 0 assert self.t['a'][idx] == 0 @pytest.mark.parametrize("idx", [1, np.int64(1), np.array(1)]) def test_row(self, table_data, idx): """Row access returns REFERENCE to data""" self.t = table_data.Table(table_data.COLS) self.tc = self.t.columns row = self.t[idx] assert row['a'] == 2 assert row[idx] == 5 assert row.columns['a'].attrs_equal(table_data.COLS[0]) assert row.columns['b'].attrs_equal(table_data.COLS[1]) assert row.columns['c'].attrs_equal(table_data.COLS[2]) # Check that setting by col index sets the table and row value row[idx] = 0 assert row[idx] == 0 assert row['b'] == 0 assert self.t['b'][idx] == 0 assert self.t[idx]['b'] == 0 # Check that setting by col name sets the table and row value row['a'] = 0 assert row[0] == 0 assert row['a'] == 0 assert self.t['a'][1] == 0 assert self.t[1]['a'] == 0 def test_empty_iterable_item(self, table_data): """ Table item access with [], (), or np.array([]) returns the same table with no rows. """ self.t = table_data.Table(table_data.COLS) for item in [], (), np.array([]): t2 = self.t[item] assert not t2 assert len(t2) == 0 assert t2['a'].attrs_equal(table_data.COLS[0]) assert t2['b'].attrs_equal(table_data.COLS[1]) assert t2['c'].attrs_equal(table_data.COLS[2]) def test_table_slice(self, table_data): """Table slice returns REFERENCE to data""" self.t = table_data.Table(table_data.COLS) self.tc = self.t.columns t2 = self.t[1:3] assert np.all(t2['a'] == table_data.DATA['a'][1:3]) assert t2['a'].attrs_equal(table_data.COLS[0]) assert t2['b'].attrs_equal(table_data.COLS[1]) assert t2['c'].attrs_equal(table_data.COLS[2]) t2['a'][0] = 0 assert np.all(self.t['a'] == np.array([1, 0, 3])) assert t2.masked == self.t.masked assert t2._column_class == self.t._column_class assert isinstance(t2, table_data.Table) def test_fancy_index_slice(self, table_data): """Table fancy slice returns COPY of data""" self.t = table_data.Table(table_data.COLS) self.tc = self.t.columns slice = np.array([0, 2]) t2 = self.t[slice] assert np.all(t2['a'] == table_data.DATA['a'][slice]) assert t2['a'].attrs_equal(table_data.COLS[0]) assert t2['b'].attrs_equal(table_data.COLS[1]) assert t2['c'].attrs_equal(table_data.COLS[2]) t2['a'][0] = 0 assert np.all(self.t.as_array() == table_data.DATA) assert np.any(t2['a'] != table_data.DATA['a'][slice]) assert t2.masked == self.t.masked assert t2._column_class == self.t._column_class assert isinstance(t2, table_data.Table) def test_list_index_slice(self, table_data): """Table list index slice returns COPY of data""" self.t = table_data.Table(table_data.COLS) self.tc = self.t.columns slice = [0, 2] t2 = self.t[slice] assert np.all(t2['a'] == table_data.DATA['a'][slice]) assert t2['a'].attrs_equal(table_data.COLS[0]) assert t2['b'].attrs_equal(table_data.COLS[1]) assert t2['c'].attrs_equal(table_data.COLS[2]) t2['a'][0] = 0 assert np.all(self.t.as_array() == table_data.DATA) assert np.any(t2['a'] != table_data.DATA['a'][slice]) assert t2.masked == self.t.masked assert t2._column_class == self.t._column_class assert isinstance(t2, table_data.Table) def test_select_columns(self, table_data): """Select columns returns COPY of data and all column attributes""" self.t = table_data.Table(table_data.COLS) self.tc = self.t.columns # try both lists and tuples for columns in (('a', 'c'), ['a', 'c']): t2 = self.t[columns] assert np.all(t2['a'] == table_data.DATA['a']) assert np.all(t2['c'] == table_data.DATA['c']) assert t2['a'].attrs_equal(table_data.COLS[0]) assert t2['c'].attrs_equal(table_data.COLS[2]) t2['a'][0] = 0 assert np.all(self.t.as_array() == table_data.DATA) assert np.any(t2['a'] != table_data.DATA['a']) assert t2.masked == self.t.masked assert t2._column_class == self.t._column_class def test_select_columns_fail(self, table_data): """Selecting a column that doesn't exist fails""" self.t = table_data.Table(table_data.COLS) with pytest.raises(KeyError) as err: self.t[['xxxx']] assert "KeyError: 'xxxx'" in str(err) with pytest.raises(KeyError) as err: self.t[['xxxx', 'yyyy']] assert "KeyError: 'xxxx'" in str(err) def test_np_where(self, table_data): """Select rows using output of np.where""" t = table_data.Table(table_data.COLS) # Select last two rows rows = np.where(t['a'] > 1.5) t2 = t[rows] assert np.all(t2['a'] == [2, 3]) assert np.all(t2['b'] == [5, 6]) assert isinstance(t2, table_data.Table) # Select no rows rows = np.where(t['a'] > 100) t2 = t[rows] assert len(t2) == 0 assert isinstance(t2, table_data.Table) def test_np_integers(self, table_data): """ Select rows using numpy integers. This is a regression test for a py 3.3 failure mode """ t = table_data.Table(table_data.COLS) idxs = np.random.randint(len(t), size=2) item = t[idxs[1]] def test_select_bad_column(self, table_data): """Select column name that does not exist""" self.t = table_data.Table(table_data.COLS) self.tc = self.t.columns with pytest.raises(ValueError): self.t['a', 1]
b0176f266ebcc0eb9b3d97a94f0f1ddfe44c69ab56ff6e111877b401ef4733a7
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from ... import table from .. import pprint class MyRow(table.Row): def __str__(self): return str(self.as_void()) class MyColumn(table.Column): pass class MyMaskedColumn(table.MaskedColumn): pass class MyTableColumns(table.TableColumns): pass class MyTableFormatter(pprint.TableFormatter): pass class MyTable(table.Table): Row = MyRow Column = MyColumn MaskedColumn = MyMaskedColumn TableColumns = MyTableColumns TableFormatter = MyTableFormatter def test_simple_subclass(): t = MyTable([[1, 2], [3, 4]]) row = t[0] assert isinstance(row, MyRow) assert isinstance(t['col0'], MyColumn) assert isinstance(t.columns, MyTableColumns) assert isinstance(t.formatter, MyTableFormatter) t2 = MyTable(t) row = t2[0] assert isinstance(row, MyRow) assert str(row) == '(1, 3)' t3 = table.Table(t) row = t3[0] assert not isinstance(row, MyRow) assert str(row) != '(1, 3)' t = MyTable([[1, 2], [3, 4]], masked=True) row = t[0] assert isinstance(row, MyRow) assert str(row) == '(1, 3)' assert isinstance(t['col0'], MyMaskedColumn) assert isinstance(t.formatter, MyTableFormatter) class ParamsRow(table.Row): """ Row class that allows access to an arbitrary dict of parameters stored as a dict object in the ``params`` column. """ def __getitem__(self, item): if item not in self.colnames: return super().__getitem__('params')[item] else: return super().__getitem__(item) def keys(self): out = [name for name in self.colnames if name != 'params'] params = [key.lower() for key in sorted(self['params'])] return out + params def values(self): return [self[key] for key in self.keys()] class ParamsTable(table.Table): Row = ParamsRow def test_params_table(): t = ParamsTable(names=['a', 'b', 'params'], dtype=['i', 'f', 'O']) t.add_row((1, 2.0, {'x': 1.5, 'y': 2.5})) t.add_row((2, 3.0, {'z': 'hello', 'id': 123123})) assert t['params'][0] == {'x': 1.5, 'y': 2.5} assert t[0]['params'] == {'x': 1.5, 'y': 2.5} assert t[0]['y'] == 2.5 assert t[1]['id'] == 123123 assert list(t[1].keys()) == ['a', 'b', 'id', 'z'] assert list(t[1].values()) == [2, 3.0, 123123, 'hello']
9fa59ae5500ae24d37a3fe3aa9525c0fcdf079adabf821cb3a9b3c6f890ec73c
import numpy as np import pickle from ...table import Table, Column, MaskedColumn, QTable from ...table.table_helpers import simple_table from ...units import Quantity, deg from ...time import Time from ...coordinates import SkyCoord def test_pickle_column(protocol): c = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) cs = pickle.dumps(c) cp = pickle.loads(cs) assert np.all(cp == c) assert cp.attrs_equal(c) assert cp._parent_table is None assert repr(c) == repr(cp) def test_pickle_masked_column(protocol): c = MaskedColumn(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) c.mask[1] = True c.fill_value = -99 cs = pickle.dumps(c) cp = pickle.loads(cs) assert np.all(cp._data == c._data) assert np.all(cp.mask == c.mask) assert cp.attrs_equal(c) assert cp.fill_value == -99 assert cp._parent_table is None assert repr(c) == repr(cp) def test_pickle_multidimensional_column(protocol): """Regression test for https://github.com/astropy/astropy/issues/4098""" a = np.zeros((3, 2)) c = Column(a, name='a') cs = pickle.dumps(c) cp = pickle.loads(cs) assert np.all(c == cp) assert c.shape == cp.shape assert cp.attrs_equal(c) assert repr(c) == repr(cp) def test_pickle_table(protocol): a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) b = Column(data=[3.0, 4.0], name='b', format='%05d', description='col b', unit='cm', meta={'b': 1}) for table_class in Table, QTable: t = table_class([a, b], meta={'a': 1, 'b': Quantity(10, unit='s')}) t['c'] = Quantity([1, 2], unit='m') t['d'] = Time(['2001-01-02T12:34:56', '2001-02-03T00:01:02']) t['e'] = SkyCoord([125.0, 180.0]*deg, [-45.0, 36.5]*deg) ts = pickle.dumps(t) tp = pickle.loads(ts) assert tp.__class__ is table_class assert np.all(tp['a'] == t['a']) assert np.all(tp['b'] == t['b']) # test mixin columns assert np.all(tp['c'] == t['c']) assert np.all(tp['d'] == t['d']) assert np.all(tp['e'].ra == t['e'].ra) assert np.all(tp['e'].dec == t['e'].dec) assert type(tp['c']) is type(t['c']) # nopep8 assert type(tp['d']) is type(t['d']) # nopep8 assert type(tp['e']) is type(t['e']) # nopep8 assert tp.meta == t.meta assert type(tp) is type(t) assert isinstance(tp['c'], Quantity if (table_class is QTable) else Column) def test_pickle_masked_table(protocol): a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) b = Column(data=[3.0, 4.0], name='b', format='%05d', description='col b', unit='cm', meta={'b': 1}) t = Table([a, b], meta={'a': 1}, masked=True) t['a'].mask[1] = True t['a'].fill_value = -99 ts = pickle.dumps(t) tp = pickle.loads(ts) for colname in ('a', 'b'): for attr in ('_data', 'mask', 'fill_value'): assert np.all(getattr(tp[colname], attr) == getattr(tp[colname], attr)) assert tp['a'].attrs_equal(t['a']) assert tp['b'].attrs_equal(t['b']) assert tp.meta == t.meta def test_pickle_indexed_table(protocol): """ Ensure that any indices that have been added will survive pickling. """ t = simple_table() t.add_index('a') t.add_index(['a', 'b']) ts = pickle.dumps(t) tp = pickle.loads(ts) assert len(t.indices) == len(tp.indices) for index, indexp in zip(t.indices, tp.indices): assert np.all(index.data.data == indexp.data.data) assert index.data.data.colnames == indexp.data.data.colnames
52fcdbf130817c84b9f39854d1e617f12cb9e5e23f6e8f2b9f87d6e8733496da
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ All of the py.test fixtures used by astropy.table are defined here. The fixtures can not be defined in the modules that use them, because those modules are imported twice: once with `from __future__ import unicode_literals` and once without. py.test complains when the same fixtures are defined more than once. `conftest.py` is a "special" module name for py.test that is always imported, but is not looked in for tests, and it is the recommended place to put fixtures that are shared between modules. These fixtures can not be defined in a module by a different name and still be shared between modules. """ from copy import deepcopy from collections import OrderedDict import pickle import pytest import numpy as np from ... import table from ...table import table_helpers, Table, QTable from ... import time from ... import units as u from ... import coordinates from .. import pprint @pytest.fixture(params=[table.Column, table.MaskedColumn]) def Column(request): # Fixture to run all the Column tests for both an unmasked (ndarray) # and masked (MaskedArray) column. return request.param class MaskedTable(table.Table): def __init__(self, *args, **kwargs): kwargs['masked'] = True table.Table.__init__(self, *args, **kwargs) class MyRow(table.Row): pass class MyColumn(table.Column): pass class MyMaskedColumn(table.MaskedColumn): pass class MyTableColumns(table.TableColumns): pass class MyTableFormatter(pprint.TableFormatter): pass class MyTable(table.Table): Row = MyRow Column = MyColumn MaskedColumn = MyMaskedColumn TableColumns = MyTableColumns TableFormatter = MyTableFormatter # Fixture to run all the Column tests for both an unmasked (ndarray) # and masked (MaskedArray) column. @pytest.fixture(params=['unmasked', 'masked', 'subclass']) def table_types(request): class TableTypes: def __init__(self, request): if request.param == 'unmasked': self.Table = table.Table self.Column = table.Column elif request.param == 'masked': self.Table = MaskedTable self.Column = table.MaskedColumn elif request.param == 'subclass': self.Table = MyTable self.Column = MyColumn return TableTypes(request) # Fixture to run all the Column tests for both an unmasked (ndarray) # and masked (MaskedArray) column. @pytest.fixture(params=[False, True]) def table_data(request): class TableData: def __init__(self, request): self.Table = MaskedTable if request.param else table.Table self.Column = table.MaskedColumn if request.param else table.Column self.COLS = [ self.Column(name='a', data=[1, 2, 3], description='da', format='%i', meta={'ma': 1}, unit='ua'), self.Column(name='b', data=[4, 5, 6], description='db', format='%d', meta={'mb': 1}, unit='ub'), self.Column(name='c', data=[7, 8, 9], description='dc', format='%f', meta={'mc': 1}, unit='ub')] self.DATA = self.Table(self.COLS) return TableData(request) class SubclassTable(table.Table): pass @pytest.fixture(params=[True, False]) def tableclass(request): return table.Table if request.param else SubclassTable @pytest.fixture(params=list(range(0, pickle.HIGHEST_PROTOCOL + 1))) def protocol(request): """ Fixture to run all the tests for all available pickle protocols. """ return request.param # Fixture to run all tests for both an unmasked (ndarray) and masked # (MaskedArray) column. @pytest.fixture(params=[False, True]) def table_type(request): # return MaskedTable if request.param else table.Table try: request.param return MaskedTable except AttributeError: return table.Table # Stuff for testing mixin columns MIXIN_COLS = {'quantity': [0, 1, 2, 3] * u.m, 'longitude': coordinates.Longitude([0., 1., 5., 6.]*u.deg, wrap_angle=180.*u.deg), 'latitude': coordinates.Latitude([5., 6., 10., 11.]*u.deg), 'time': time.Time([2000, 2001, 2002, 2003], format='jyear'), 'skycoord': coordinates.SkyCoord(ra=[0, 1, 2, 3] * u.deg, dec=[0, 1, 2, 3] * u.deg), 'arraywrap': table_helpers.ArrayWrapper([0, 1, 2, 3]), 'ndarray': np.array([(7, 'a'), (8, 'b'), (9, 'c'), (9, 'c')], dtype='<i4,|S1').view(table.NdarrayMixin), } MIXIN_COLS['earthlocation'] = coordinates.EarthLocation( lon=MIXIN_COLS['longitude'], lat=MIXIN_COLS['latitude'], height=MIXIN_COLS['quantity']) @pytest.fixture(params=sorted(MIXIN_COLS)) def mixin_cols(request): """ Fixture to return a set of columns for mixin testing which includes an index column 'i', two string cols 'a', 'b' (for joins etc), and one of the available mixin column types. """ cols = OrderedDict() mixin_cols = deepcopy(MIXIN_COLS) cols['i'] = table.Column([0, 1, 2, 3], name='i') cols['a'] = table.Column(['a', 'b', 'b', 'c'], name='a') cols['b'] = table.Column(['b', 'c', 'a', 'd'], name='b') cols['m'] = mixin_cols[request.param] return cols @pytest.fixture(params=[False, True]) def T1(request): T = Table.read([' a b c d', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 2 b 6.0 2', ' 2 a 4.0 3', ' 0 a 0.0 4', ' 1 b 3.0 5', ' 1 a 2.0 6', ' 1 a 1.0 7', ], format='ascii') T.meta.update({'ta': 1}) T['c'].meta.update({'a': 1}) T['c'].description = 'column c' if request.param: T.add_index('a') return T @pytest.fixture(params=[Table, QTable]) def operation_table_type(request): return request.param
34e4e27db0d54fca1fb0929ef311724dae8def3179999e88c90b103c9c37030e
# Licensed under a 3-clause BSD style license - see LICENSE.rst from collections import OrderedDict import pytest import numpy as np from ...tests.helper import catch_warnings from ...table import Table, QTable, TableMergeError from ...table.operations import _get_out_class from ... import units as u from ...utils import metadata from ...utils.metadata import MergeConflictError from ... import table def sort_eq(list1, list2): return sorted(list1) == sorted(list2) class TestJoin(): def _setup(self, t_cls=Table): lines1 = [' a b c ', ' 0 foo L1', ' 1 foo L2', ' 1 bar L3', ' 2 bar L4'] lines2 = [' a b d ', ' 1 foo R1', ' 1 foo R2', ' 2 bar R3', ' 4 bar R4'] self.t1 = t_cls.read(lines1, format='ascii') self.t2 = t_cls.read(lines2, format='ascii') self.t3 = t_cls(self.t2, copy=True) self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])) self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) self.t3.meta.update(OrderedDict([('b', 3), ('c', [1, 2]), ('d', 2), ('a', 1)])) self.meta_merge = OrderedDict([('b', [1, 2, 3, 4]), ('c', {'a': 1, 'b': 1}), ('d', 1), ('a', 1)]) def test_table_meta_merge(self, operation_table_type): self._setup(operation_table_type) out = table.join(self.t1, self.t2, join_type='inner') assert out.meta == self.meta_merge def test_table_meta_merge_conflict(self, operation_table_type): self._setup(operation_table_type) with catch_warnings() as w: out = table.join(self.t1, self.t3, join_type='inner') assert len(w) == 3 assert out.meta == self.t3.meta with catch_warnings() as w: out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='warn') assert len(w) == 3 assert out.meta == self.t3.meta with catch_warnings() as w: out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='silent') assert len(w) == 0 assert out.meta == self.t3.meta with pytest.raises(MergeConflictError): out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='error') with pytest.raises(ValueError): out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='nonsense') def test_both_unmasked_inner(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 # Basic join with default parameters (inner join on common keys) t12 = table.join(t1, t2) assert type(t12) is operation_table_type assert type(t12['a']) is type(t1['a']) assert type(t12['b']) is type(t1['b']) assert type(t12['c']) is type(t1['c']) assert type(t12['d']) is type(t2['d']) assert t12.masked is False assert sort_eq(t12.pformat(), [' a b c d ', '--- --- --- ---', ' 1 foo L2 R1', ' 1 foo L2 R2', ' 2 bar L4 R3']) # Table meta merged properly assert t12.meta == self.meta_merge def test_both_unmasked_left_right_outer(self, operation_table_type): if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 # Left join t12 = table.join(t1, t2, join_type='left') assert t12.masked is True assert sort_eq(t12.pformat(), [' a b c d ', '--- --- --- ---', ' 0 foo L1 --', ' 1 bar L3 --', ' 1 foo L2 R1', ' 1 foo L2 R2', ' 2 bar L4 R3']) # Right join t12 = table.join(t1, t2, join_type='right') assert t12.masked is True assert sort_eq(t12.pformat(), [' a b c d ', '--- --- --- ---', ' 1 foo L2 R1', ' 1 foo L2 R2', ' 2 bar L4 R3', ' 4 bar -- R4']) # Outer join t12 = table.join(t1, t2, join_type='outer') assert t12.masked is True assert sort_eq(t12.pformat(), [' a b c d ', '--- --- --- ---', ' 0 foo L1 --', ' 1 bar L3 --', ' 1 foo L2 R1', ' 1 foo L2 R2', ' 2 bar L4 R3', ' 4 bar -- R4']) # Check that the common keys are 'a', 'b' t12a = table.join(t1, t2, join_type='outer') t12b = table.join(t1, t2, join_type='outer', keys=['a', 'b']) assert np.all(t12a.as_array() == t12b.as_array()) def test_both_unmasked_single_key_inner(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 # Inner join on 'a' column t12 = table.join(t1, t2, keys='a') assert type(t12) is operation_table_type assert type(t12['a']) is type(t1['a']) assert type(t12['b_1']) is type(t1['b']) assert type(t12['c']) is type(t1['c']) assert type(t12['b_2']) is type(t2['b']) assert type(t12['d']) is type(t2['d']) assert t12.masked is False assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ', '--- --- --- --- ---', ' 1 foo L2 foo R1', ' 1 foo L2 foo R2', ' 1 bar L3 foo R1', ' 1 bar L3 foo R2', ' 2 bar L4 bar R3']) def test_both_unmasked_single_key_left_right_outer(self, operation_table_type): if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 # Left join t12 = table.join(t1, t2, join_type='left', keys='a') assert t12.masked is True assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ', '--- --- --- --- ---', ' 0 foo L1 -- --', ' 1 foo L2 foo R1', ' 1 foo L2 foo R2', ' 1 bar L3 foo R1', ' 1 bar L3 foo R2', ' 2 bar L4 bar R3']) # Right join t12 = table.join(t1, t2, join_type='right', keys='a') assert t12.masked is True assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ', '--- --- --- --- ---', ' 1 foo L2 foo R1', ' 1 foo L2 foo R2', ' 1 bar L3 foo R1', ' 1 bar L3 foo R2', ' 2 bar L4 bar R3', ' 4 -- -- bar R4']) # Outer join t12 = table.join(t1, t2, join_type='outer', keys='a') assert t12.masked is True assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ', '--- --- --- --- ---', ' 0 foo L1 -- --', ' 1 foo L2 foo R1', ' 1 foo L2 foo R2', ' 1 bar L3 foo R1', ' 1 bar L3 foo R2', ' 2 bar L4 bar R3', ' 4 -- -- bar R4']) def test_masked_unmasked(self, operation_table_type): if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') self._setup(operation_table_type) t1 = self.t1 t1m = operation_table_type(self.t1, masked=True) t2 = self.t2 # Result should be masked even though not req'd by inner join t1m2 = table.join(t1m, t2, join_type='inner') assert t1m2.masked is True # Result should match non-masked result t12 = table.join(t1, t2) assert np.all(t12.as_array() == np.array(t1m2)) # Mask out some values in left table and make sure they propagate t1m['b'].mask[1] = True t1m['c'].mask[2] = True t1m2 = table.join(t1m, t2, join_type='inner', keys='a') assert sort_eq(t1m2.pformat(), [' a b_1 c b_2 d ', '--- --- --- --- ---', ' 1 -- L2 foo R1', ' 1 -- L2 foo R2', ' 1 bar -- foo R1', ' 1 bar -- foo R2', ' 2 bar L4 bar R3']) t21m = table.join(t2, t1m, join_type='inner', keys='a') assert sort_eq(t21m.pformat(), [' a b_1 d b_2 c ', '--- --- --- --- ---', ' 1 foo R2 -- L2', ' 1 foo R2 bar --', ' 1 foo R1 -- L2', ' 1 foo R1 bar --', ' 2 bar R3 bar L4']) def test_masked_masked(self, operation_table_type): self._setup(operation_table_type) """Two masked tables""" if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') t1 = self.t1 t1m = operation_table_type(self.t1, masked=True) t2 = self.t2 t2m = operation_table_type(self.t2, masked=True) # Result should be masked even though not req'd by inner join t1m2m = table.join(t1m, t2m, join_type='inner') assert t1m2m.masked is True # Result should match non-masked result t12 = table.join(t1, t2) assert np.all(t12.as_array() == np.array(t1m2m)) # Mask out some values in both tables and make sure they propagate t1m['b'].mask[1] = True t1m['c'].mask[2] = True t2m['d'].mask[2] = True t1m2m = table.join(t1m, t2m, join_type='inner', keys='a') assert sort_eq(t1m2m.pformat(), [' a b_1 c b_2 d ', '--- --- --- --- ---', ' 1 -- L2 foo R1', ' 1 -- L2 foo R2', ' 1 bar -- foo R1', ' 1 bar -- foo R2', ' 2 bar L4 bar --']) def test_col_rename(self, operation_table_type): self._setup(operation_table_type) """ Test auto col renaming when there is a conflict. Use non-default values of uniq_col_name and table_names. """ t1 = self.t1 t2 = self.t2 t12 = table.join(t1, t2, uniq_col_name='x_{table_name}_{col_name}_y', table_names=['L', 'R'], keys='a') assert t12.colnames == ['a', 'x_L_b_y', 'c', 'x_R_b_y', 'd'] def test_rename_conflict(self, operation_table_type): self._setup(operation_table_type) """ Test that auto-column rename fails because of a conflict with an existing column """ t1 = self.t1 t2 = self.t2 t1['b_1'] = 1 # Add a new column b_1 that will conflict with auto-rename with pytest.raises(TableMergeError): table.join(t1, t2, keys='a') def test_missing_keys(self, operation_table_type): self._setup(operation_table_type) """Merge on a key column that doesn't exist""" t1 = self.t1 t2 = self.t2 with pytest.raises(TableMergeError): table.join(t1, t2, keys=['a', 'not there']) def test_bad_join_type(self, operation_table_type): self._setup(operation_table_type) """Bad join_type input""" t1 = self.t1 t2 = self.t2 with pytest.raises(ValueError): table.join(t1, t2, join_type='illegal value') def test_no_common_keys(self, operation_table_type): self._setup(operation_table_type) """Merge tables with no common keys""" t1 = self.t1 t2 = self.t2 del t1['a'] del t1['b'] del t2['a'] del t2['b'] with pytest.raises(TableMergeError): table.join(t1, t2) def test_masked_key_column(self, operation_table_type): self._setup(operation_table_type) """Merge on a key column that has a masked element""" if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') t1 = self.t1 t2 = operation_table_type(self.t2, masked=True) table.join(t1, t2) # OK t2['a'].mask[0] = True with pytest.raises(TableMergeError): table.join(t1, t2) def test_col_meta_merge(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t2.rename_column('d', 'c') # force col conflict and renaming meta1 = OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]) meta2 = OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]) # Key col 'a', should first value ('cm') t1['a'].unit = 'cm' t2['a'].unit = 'm' # Key col 'b', take first value 't1_b' t1['b'].info.description = 't1_b' # Key col 'b', take first non-empty value 't1_b' t2['b'].info.format = '%6s' # Key col 'a', should be merged meta t1['a'].info.meta = meta1 t2['a'].info.meta = meta2 # Key col 'b', should be meta2 t2['b'].info.meta = meta2 # All these should pass through t1['c'].info.format = '%3s' t1['c'].info.description = 't1_c' t2['c'].info.format = '%6s' t2['c'].info.description = 't2_c' with catch_warnings(metadata.MergeConflictWarning) as warning_lines: t12 = table.join(t1, t2, keys=['a', 'b']) if operation_table_type is Table: assert warning_lines[0].category == metadata.MergeConflictWarning assert ("In merged column 'a' the 'unit' attribute does not match (cm != m)" in str(warning_lines[0].message)) else: assert len(warning_lines) == 0 assert t12['a'].unit == 'm' assert t12['b'].info.description == 't1_b' assert t12['b'].info.format == '%6s' assert t12['a'].info.meta == self.meta_merge assert t12['b'].info.meta == meta2 assert t12['c_1'].info.format == '%3s' assert t12['c_1'].info.description == 't1_c' assert t12['c_2'].info.format == '%6s' assert t12['c_2'].info.description == 't2_c' def test_join_multidimensional(self, operation_table_type): self._setup(operation_table_type) # Regression test for #2984, which was an issue where join did not work # on multi-dimensional columns. t1 = operation_table_type() t1['a'] = [1, 2, 3] t1['b'] = np.ones((3, 4)) t2 = operation_table_type() t2['a'] = [1, 2, 3] t2['c'] = [4, 5, 6] t3 = table.join(t1, t2) np.testing.assert_allclose(t3['a'], t1['a']) np.testing.assert_allclose(t3['b'], t1['b']) np.testing.assert_allclose(t3['c'], t2['c']) def test_join_multidimensional_masked(self, operation_table_type): self._setup(operation_table_type) """ Test for outer join with multidimensional columns where masking is required. (Issue #4059). """ if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') a = table.MaskedColumn([1, 2, 3], name='a') a2 = table.Column([1, 3, 4], name='a') b = table.MaskedColumn([[1, 2], [3, 4], [5, 6]], name='b', mask=[[1, 0], [0, 1], [0, 0]]) c = table.Column([[1, 1], [2, 2], [3, 3]], name='c') t1 = operation_table_type([a, b]) t2 = operation_table_type([a2, c]) t12 = table.join(t1, t2, join_type='inner') assert np.all(t12['b'].mask == [[True, False], [False, False]]) assert np.all(t12['c'].mask == [[False, False], [False, False]]) t12 = table.join(t1, t2, join_type='outer') assert np.all(t12['b'].mask == [[True, False], [False, True], [False, False], [True, True]]) assert np.all(t12['c'].mask == [[False, False], [True, True], [False, False], [False, False]]) class TestSetdiff(): def _setup(self, t_cls=Table): lines1 = [' a b ', ' 0 foo ', ' 1 foo ', ' 1 bar ', ' 2 bar '] lines2 = [' a b ', ' 0 foo ', ' 3 foo ', ' 4 bar ', ' 2 bar '] lines3 = [' a b d ', ' 0 foo R1', ' 8 foo R2', ' 1 bar R3', ' 4 bar R4'] self.t1 = t_cls.read(lines1, format='ascii') self.t2 = t_cls.read(lines2, format='ascii') self.t3 = t_cls.read(lines3, format='ascii') def test_default_same_columns(self, operation_table_type): self._setup(operation_table_type) out = table.setdiff(self.t1, self.t2) assert type(out['a']) is type(self.t1['a']) assert type(out['b']) is type(self.t1['b']) assert out.pformat() == [' a b ', '--- ---', ' 1 bar', ' 1 foo'] def test_default_same_tables(self, operation_table_type): self._setup(operation_table_type) out = table.setdiff(self.t1, self.t1) assert type(out['a']) is type(self.t1['a']) assert type(out['b']) is type(self.t1['b']) assert out.pformat() == [' a b ', '--- ---'] def test_extra_col_left_table(self, operation_table_type): self._setup(operation_table_type) with pytest.raises(ValueError): out = table.setdiff(self.t3, self.t1) def test_extra_col_right_table(self, operation_table_type): self._setup(operation_table_type) out = table.setdiff(self.t1, self.t3) assert type(out['a']) is type(self.t1['a']) assert type(out['b']) is type(self.t1['b']) assert out.pformat() == [' a b ', '--- ---', ' 1 foo', ' 2 bar'] def test_keys(self, operation_table_type): self._setup(operation_table_type) out = table.setdiff(self.t3, self.t1, keys=['a', 'b']) assert type(out['a']) is type(self.t1['a']) assert type(out['b']) is type(self.t1['b']) assert out.pformat() == [' a b d ', '--- --- ---', ' 4 bar R4', ' 8 foo R2'] def test_missing_key(self, operation_table_type): self._setup(operation_table_type) with pytest.raises(ValueError): out = table.setdiff(self.t3, self.t1, keys=['a', 'd']) class TestVStack(): def _setup(self, t_cls=Table): self.t1 = t_cls.read([' a b', ' 0. foo', ' 1. bar'], format='ascii') self.t2 = t_cls.read([' a b c', ' 2. pez 4', ' 3. sez 5'], format='ascii') self.t3 = t_cls.read([' a b', ' 4. 7', ' 5. 8', ' 6. 9'], format='ascii') self.t4 = t_cls(self.t1, copy=True, masked=t_cls is Table) # The following table has meta-data that conflicts with t1 self.t5 = t_cls(self.t1, copy=True) self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])) self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) self.t4.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)])) self.t5.meta.update(OrderedDict([('b', 3), ('c', 'k'), ('d', 1)])) self.meta_merge = OrderedDict([('b', [1, 2, 3, 4, 5, 6]), ('c', {'a': 1, 'b': 1, 'c': 1}), ('d', 1), ('a', 1), ('e', 1)]) def test_stack_rows(self, operation_table_type): self._setup(operation_table_type) t2 = self.t1.copy() t2.meta.clear() out = table.vstack([self.t1, t2[1]]) assert type(out['a']) is type(self.t1['a']) assert type(out['b']) is type(self.t1['b']) assert out.pformat() == [' a b ', '--- ---', '0.0 foo', '1.0 bar', '1.0 bar'] def test_table_meta_merge(self, operation_table_type): self._setup(operation_table_type) out = table.vstack([self.t1, self.t2, self.t4], join_type='inner') assert out.meta == self.meta_merge def test_table_meta_merge_conflict(self, operation_table_type): self._setup(operation_table_type) with catch_warnings() as w: out = table.vstack([self.t1, self.t5], join_type='inner') assert len(w) == 2 assert out.meta == self.t5.meta with catch_warnings() as w: out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='warn') assert len(w) == 2 assert out.meta == self.t5.meta with catch_warnings() as w: out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='silent') assert len(w) == 0 assert out.meta == self.t5.meta with pytest.raises(MergeConflictError): out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='error') with pytest.raises(ValueError): out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='nonsense') def test_bad_input_type(self, operation_table_type): self._setup(operation_table_type) with pytest.raises(TypeError): table.vstack([]) with pytest.raises(TypeError): table.vstack(1) with pytest.raises(TypeError): table.vstack([self.t2, 1]) with pytest.raises(ValueError): table.vstack([self.t1, self.t2], join_type='invalid join type') def test_stack_basic_inner(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t4 = self.t4 t12 = table.vstack([t1, t2], join_type='inner') assert t12.masked is False assert type(t12) is operation_table_type assert type(t12['a']) is type(t1['a']) assert type(t12['b']) is type(t1['b']) assert t12.pformat() == [' a b ', '--- ---', '0.0 foo', '1.0 bar', '2.0 pez', '3.0 sez'] t124 = table.vstack([t1, t2, t4], join_type='inner') assert type(t124) is operation_table_type assert type(t12['a']) is type(t1['a']) assert type(t12['b']) is type(t1['b']) assert t124.pformat() == [' a b ', '--- ---', '0.0 foo', '1.0 bar', '2.0 pez', '3.0 sez', '0.0 foo', '1.0 bar'] def test_stack_basic_outer(self, operation_table_type): if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t4 = self.t4 t12 = table.vstack([t1, t2], join_type='outer') assert t12.pformat() == [' a b c ', '--- --- ---', '0.0 foo --', '1.0 bar --', '2.0 pez 4', '3.0 sez 5'] t124 = table.vstack([t1, t2, t4], join_type='outer') assert t124.pformat() == [' a b c ', '--- --- ---', '0.0 foo --', '1.0 bar --', '2.0 pez 4', '3.0 sez 5', '0.0 foo --', '1.0 bar --'] def test_stack_incompatible(self, operation_table_type): self._setup(operation_table_type) with pytest.raises(TableMergeError) as excinfo: table.vstack([self.t1, self.t3], join_type='inner') assert ("The 'b' columns have incompatible types: {0}" .format([self.t1['b'].dtype.name, self.t3['b'].dtype.name]) in str(excinfo)) with pytest.raises(TableMergeError) as excinfo: table.vstack([self.t1, self.t3], join_type='outer') assert "The 'b' columns have incompatible types:" in str(excinfo) with pytest.raises(TableMergeError): table.vstack([self.t1, self.t2], join_type='exact') t1_reshape = self.t1.copy() t1_reshape['b'].shape = [2, 1] with pytest.raises(TableMergeError) as excinfo: table.vstack([self.t1, t1_reshape]) assert "have different shape" in str(excinfo) def test_vstack_one_masked(self, operation_table_type): if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') self._setup(operation_table_type) t1 = self.t1 t4 = self.t4 t4['b'].mask[1] = True assert table.vstack([t1, t4]).pformat() == [' a b ', '--- ---', '0.0 foo', '1.0 bar', '0.0 foo', '1.0 --'] def test_col_meta_merge_inner(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t4 = self.t4 # Key col 'a', should last value ('km') t1['a'].info.unit = 'cm' t2['a'].info.unit = 'm' t4['a'].info.unit = 'km' # Key col 'a' format should take last when all match t1['a'].info.format = '%f' t2['a'].info.format = '%f' t4['a'].info.format = '%f' # Key col 'b', take first value 't1_b' t1['b'].info.description = 't1_b' # Key col 'b', take first non-empty value '%6s' t4['b'].info.format = '%6s' # Key col 'a', should be merged meta t1['a'].info.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])) t2['a'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) t4['a'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)])) # Key col 'b', should be meta2 t2['b'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) with catch_warnings(metadata.MergeConflictWarning) as warning_lines: out = table.vstack([t1, t2, t4], join_type='inner') if operation_table_type is Table: assert warning_lines[0].category == metadata.MergeConflictWarning assert ("In merged column 'a' the 'unit' attribute does not match (cm != m)" in str(warning_lines[0].message)) assert warning_lines[1].category == metadata.MergeConflictWarning assert ("In merged column 'a' the 'unit' attribute does not match (m != km)" in str(warning_lines[1].message)) # Check units are suitably ignored for a regular Table assert out.pformat() == [' a b ', ' km ', '-------- ------', '0.000000 foo', '1.000000 bar', '2.000000 pez', '3.000000 sez', '0.000000 foo', '1.000000 bar'] else: assert len(warning_lines) == 0 # Check QTable correctly dealt with units. assert out.pformat() == [' a b ', ' km ', '-------- ------', '0.000000 foo', '0.000010 bar', '0.002000 pez', '0.003000 sez', '0.000000 foo', '1.000000 bar'] assert out['a'].info.unit == 'km' assert out['a'].info.format == '%f' assert out['b'].info.description == 't1_b' assert out['b'].info.format == '%6s' assert out['a'].info.meta == self.meta_merge assert out['b'].info.meta == OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]) def test_col_meta_merge_outer(self, operation_table_type): if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t4 = self.t4 # Key col 'a', should last value ('km') t1['a'].unit = 'cm' t2['a'].unit = 'm' t4['a'].unit = 'km' # Key col 'a' format should take last when all match t1['a'].info.format = '%0d' t2['a'].info.format = '%0d' t4['a'].info.format = '%0d' # Key col 'b', take first value 't1_b' t1['b'].info.description = 't1_b' # Key col 'b', take first non-empty value '%6s' t4['b'].info.format = '%6s' # Key col 'a', should be merged meta t1['a'].info.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])) t2['a'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) t4['a'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)])) # Key col 'b', should be meta2 t2['b'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) # All these should pass through t2['c'].unit = 'm' t2['c'].info.format = '%6s' t2['c'].info.description = 't2_c' with catch_warnings(metadata.MergeConflictWarning) as warning_lines: out = table.vstack([t1, t2, t4], join_type='outer') assert warning_lines[0].category == metadata.MergeConflictWarning assert ("In merged column 'a' the 'unit' attribute does not match (cm != m)" in str(warning_lines[0].message)) assert warning_lines[1].category == metadata.MergeConflictWarning assert ("In merged column 'a' the 'unit' attribute does not match (m != km)" in str(warning_lines[1].message)) assert out['a'].unit == 'km' assert out['a'].info.format == '%0d' assert out['b'].info.description == 't1_b' assert out['b'].info.format == '%6s' assert out['a'].info.meta == self.meta_merge assert out['b'].info.meta == OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]) assert out['c'].info.unit == 'm' assert out['c'].info.format == '%6s' assert out['c'].info.description == 't2_c' def test_vstack_one_table(self, operation_table_type): self._setup(operation_table_type) """Regression test for issue #3313""" assert (self.t1 == table.vstack(self.t1)).all() assert (self.t1 == table.vstack([self.t1])).all() def test_check_for_mixin_functionality(self, mixin_cols): col = mixin_cols['m'] t = table.QTable([col]) cls_name = type(col).__name__ # Vstack works for these classes: implemented_mixin_classes = ['Quantity', 'Angle', 'Latitude', 'Longitude', 'EarthLocation'] if cls_name in implemented_mixin_classes: table.vstack([t, t]) else: with pytest.raises(NotImplementedError) as err: table.vstack([t, t]) assert ('vstack unavailable for mixin column type(s): {}' .format(cls_name) in str(err)) class TestHStack(): def _setup(self, t_cls=Table): self.t1 = t_cls.read([' a b', ' 0. foo', ' 1. bar'], format='ascii') self.t2 = t_cls.read([' a b c', ' 2. pez 4', ' 3. sez 5'], format='ascii') self.t3 = t_cls.read([' d e', ' 4. 7', ' 5. 8', ' 6. 9'], format='ascii') self.t4 = t_cls(self.t1, copy=True, masked=True) self.t4['a'].name = 'f' self.t4['b'].name = 'g' # The following table has meta-data that conflicts with t1 self.t5 = t_cls(self.t1, copy=True) self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])) self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) self.t4.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)])) self.t5.meta.update(OrderedDict([('b', 3), ('c', 'k'), ('d', 1)])) self.meta_merge = OrderedDict([('b', [1, 2, 3, 4, 5, 6]), ('c', {'a': 1, 'b': 1, 'c': 1}), ('d', 1), ('a', 1), ('e', 1)]) def test_stack_same_table(self, operation_table_type): """ From #2995, test that hstack'ing references to the same table has the expected output. """ self._setup(operation_table_type) out = table.hstack([self.t1, self.t1]) assert out.pformat() == ['a_1 b_1 a_2 b_2', '--- --- --- ---', '0.0 foo 0.0 foo', '1.0 bar 1.0 bar'] def test_stack_rows(self, operation_table_type): self._setup(operation_table_type) out = table.hstack([self.t1[0], self.t2[1]]) assert out.pformat() == ['a_1 b_1 a_2 b_2 c ', '--- --- --- --- ---', '0.0 foo 3.0 sez 5'] def test_table_meta_merge(self, operation_table_type): self._setup(operation_table_type) out = table.hstack([self.t1, self.t2, self.t4], join_type='inner') assert out.meta == self.meta_merge def test_table_meta_merge_conflict(self, operation_table_type): self._setup(operation_table_type) with catch_warnings() as w: out = table.hstack([self.t1, self.t5], join_type='inner') assert len(w) == 2 assert out.meta == self.t5.meta with catch_warnings() as w: out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='warn') assert len(w) == 2 assert out.meta == self.t5.meta with catch_warnings() as w: out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='silent') assert len(w) == 0 assert out.meta == self.t5.meta with pytest.raises(MergeConflictError): out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='error') with pytest.raises(ValueError): out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='nonsense') def test_bad_input_type(self, operation_table_type): self._setup(operation_table_type) with pytest.raises(TypeError): table.hstack([]) with pytest.raises(TypeError): table.hstack(1) with pytest.raises(TypeError): table.hstack([self.t2, 1]) with pytest.raises(ValueError): table.hstack([self.t1, self.t2], join_type='invalid join type') def test_stack_basic(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t3 = self.t3 t4 = self.t4 out = table.hstack([t1, t2], join_type='inner') assert out.masked is False assert type(out) is operation_table_type assert type(out['a_1']) is type(t1['a']) assert type(out['b_1']) is type(t1['b']) assert type(out['a_2']) is type(t2['a']) assert type(out['b_2']) is type(t2['b']) assert out.pformat() == ['a_1 b_1 a_2 b_2 c ', '--- --- --- --- ---', '0.0 foo 2.0 pez 4', '1.0 bar 3.0 sez 5'] # stacking as a list gives same result out_list = table.hstack([t1, t2], join_type='inner') assert out.pformat() == out_list.pformat() out = table.hstack([t1, t2], join_type='outer') assert out.pformat() == out_list.pformat() out = table.hstack([t1, t2, t3, t4], join_type='outer') assert out.pformat() == ['a_1 b_1 a_2 b_2 c d e f g ', '--- --- --- --- --- --- --- --- ---', '0.0 foo 2.0 pez 4 4.0 7 0.0 foo', '1.0 bar 3.0 sez 5 5.0 8 1.0 bar', ' -- -- -- -- -- 6.0 9 -- --'] out = table.hstack([t1, t2, t3, t4], join_type='inner') assert out.pformat() == ['a_1 b_1 a_2 b_2 c d e f g ', '--- --- --- --- --- --- --- --- ---', '0.0 foo 2.0 pez 4 4.0 7 0.0 foo', '1.0 bar 3.0 sez 5 5.0 8 1.0 bar'] def test_stack_incompatible(self, operation_table_type): self._setup(operation_table_type) # For join_type exact, which will fail here because n_rows # does not match with pytest.raises(TableMergeError): table.hstack([self.t1, self.t3], join_type='exact') def test_hstack_one_masked(self, operation_table_type): if operation_table_type is QTable: pytest.xfail() self._setup(operation_table_type) t1 = self.t1 t2 = operation_table_type(t1, copy=True, masked=True) t2.meta.clear() t2['b'].mask[1] = True assert table.hstack([t1, t2]).pformat() == ['a_1 b_1 a_2 b_2', '--- --- --- ---', '0.0 foo 0.0 foo', '1.0 bar 1.0 --'] def test_table_col_rename(self, operation_table_type): self._setup(operation_table_type) out = table.hstack([self.t1, self.t2], join_type='inner', uniq_col_name='{table_name}_{col_name}', table_names=('left', 'right')) assert out.masked is False assert out.pformat() == ['left_a left_b right_a right_b c ', '------ ------ ------- ------- ---', ' 0.0 foo 2.0 pez 4', ' 1.0 bar 3.0 sez 5'] def test_col_meta_merge(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t3 = self.t3[:2] t4 = self.t4 # Just set a bunch of meta and make sure it is the same in output meta1 = OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]) t1['a'].unit = 'cm' t1['b'].info.description = 't1_b' t4['f'].info.format = '%6s' t1['b'].info.meta.update(meta1) t3['d'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) t4['g'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)])) t3['e'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) t3['d'].unit = 'm' t3['d'].info.format = '%6s' t3['d'].info.description = 't3_c' with catch_warnings(metadata.MergeConflictWarning) as warning_lines: out = table.hstack([t1, t3, t4], join_type='exact') assert len(warning_lines) == 0 for t in [t1, t3, t4]: for name in t.colnames: for attr in ('meta', 'unit', 'format', 'description'): assert getattr(out[name].info, attr) == getattr(t[name].info, attr) # Make sure we got a copy of meta, not ref t1['b'].info.meta['b'] = None assert out['b'].info.meta['b'] == [1, 2] def test_hstack_one_table(self, operation_table_type): self._setup(operation_table_type) """Regression test for issue #3313""" assert (self.t1 == table.hstack(self.t1)).all() assert (self.t1 == table.hstack([self.t1])).all() def test_unique(operation_table_type): t = operation_table_type.read( [' a b c d', ' 2 b 7.0 0', ' 1 c 3.0 5', ' 2 b 6.0 2', ' 2 a 4.0 3', ' 1 a 1.0 7', ' 2 b 5.0 1', ' 0 a 0.0 4', ' 1 a 2.0 6', ' 1 c 3.0 5', ], format='ascii') tu = operation_table_type(np.sort(t[:-1])) t_all = table.unique(t) assert sort_eq(t_all.pformat(), tu.pformat()) t_s = t.copy() del t_s['b', 'c', 'd'] t_all = table.unique(t_s) assert sort_eq(t_all.pformat(), [' a ', '---', ' 0', ' 1', ' 2']) key1 = 'a' t1a = table.unique(t, key1) assert sort_eq(t1a.pformat(), [' a b c d ', '--- --- --- ---', ' 0 a 0.0 4', ' 1 c 3.0 5', ' 2 b 7.0 0']) t1b = table.unique(t, key1, keep='last') assert sort_eq(t1b.pformat(), [' a b c d ', '--- --- --- ---', ' 0 a 0.0 4', ' 1 c 3.0 5', ' 2 b 5.0 1']) t1c = table.unique(t, key1, keep='none') assert sort_eq(t1c.pformat(), [' a b c d ', '--- --- --- ---', ' 0 a 0.0 4']) key2 = ['a', 'b'] t2a = table.unique(t, key2) assert sort_eq(t2a.pformat(), [' a b c d ', '--- --- --- ---', ' 0 a 0.0 4', ' 1 a 1.0 7', ' 1 c 3.0 5', ' 2 a 4.0 3', ' 2 b 7.0 0']) t2b = table.unique(t, key2, keep='last') assert sort_eq(t2b.pformat(), [' a b c d ', '--- --- --- ---', ' 0 a 0.0 4', ' 1 a 2.0 6', ' 1 c 3.0 5', ' 2 a 4.0 3', ' 2 b 5.0 1']) t2c = table.unique(t, key2, keep='none') assert sort_eq(t2c.pformat(), [' a b c d ', '--- --- --- ---', ' 0 a 0.0 4', ' 2 a 4.0 3']) key2 = ['a', 'a'] with pytest.raises(ValueError) as exc: t2a = table.unique(t, key2) assert exc.value.args[0] == "duplicate key names" with pytest.raises(ValueError) as exc: table.unique(t, key2, keep=True) assert exc.value.args[0] == ( "'keep' should be one of 'first', 'last', 'none'") t1_m = operation_table_type(t1a, masked=True) t1_m['a'].mask[1] = True with pytest.raises(ValueError) as exc: t1_mu = table.unique(t1_m) assert exc.value.args[0] == ( "cannot use columns with masked values as keys; " "remove column 'a' from keys and rerun unique()") t1_mu = table.unique(t1_m, silent=True) assert t1_mu.pformat() == [' a b c d ', '--- --- --- ---', ' 0 a 0.0 4', ' 2 b 7.0 0', ' -- c 3.0 5'] with pytest.raises(ValueError) as e: t1_mu = table.unique(t1_m, silent=True, keys='a') t1_m = operation_table_type(t, masked=True) t1_m['a'].mask[1] = True t1_m['d'].mask[3] = True # Test that multiple masked key columns get removed in the correct # order t1_mu = table.unique(t1_m, keys=['d', 'a', 'b'], silent=True) assert t1_mu.pformat() == [' a b c d ', '--- --- --- ---', ' 2 a 4.0 --', ' 2 b 7.0 0', ' -- c 3.0 5'] def test_vstack_bytes(operation_table_type): """ Test for issue #5617 when vstack'ing bytes columns in Py3. This is really an upsteam numpy issue numpy/numpy/#8403. """ t = operation_table_type([[b'a']], names=['a']) assert t['a'].itemsize == 1 t2 = table.vstack([t, t]) assert len(t2) == 2 assert t2['a'].itemsize == 1 def test_vstack_unicode(): """ Test for problem related to issue #5617 when vstack'ing *unicode* columns. In this case the character size gets multiplied by 4. """ t = table.Table([[u'a']], names=['a']) assert t['a'].itemsize == 4 # 4-byte / char for U dtype t2 = table.vstack([t, t]) assert len(t2) == 2 assert t2['a'].itemsize == 4 def test_get_out_class(): c = table.Column([1, 2]) mc = table.MaskedColumn([1, 2]) q = [1, 2] * u.m assert _get_out_class([c, mc]) is mc.__class__ assert _get_out_class([mc, c]) is mc.__class__ assert _get_out_class([c, c]) is c.__class__ assert _get_out_class([c]) is c.__class__ with pytest.raises(ValueError): _get_out_class([c, q]) with pytest.raises(ValueError): _get_out_class([q, c]) def test_masking_required_exception(): """ Test that outer join, hstack and vstack fail for a mixin column which does not support masking. """ col = [1, 2, 3, 4] * u.m t1 = table.QTable([[1, 2, 3, 4], col], names=['a', 'b']) t2 = table.QTable([[1, 2], col[:2]], names=['a', 'c']) with pytest.raises(NotImplementedError) as err: table.vstack([t1, t2], join_type='outer') assert 'vstack requires masking' in str(err) with pytest.raises(NotImplementedError) as err: table.hstack([t1, t2], join_type='outer') assert 'hstack requires masking' in str(err) with pytest.raises(NotImplementedError) as err: table.join(t1, t2, join_type='outer') assert 'join requires masking' in str(err)
1b0acdfda14eaa45237caed2f6332e51e48a90cda46c9563faf5effda08741b7
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import operator import pytest import numpy as np from ...tests.helper import assert_follows_unicode_guidelines, catch_warnings from ... import table from ... import units as u class TestColumn(): def test_subclass(self, Column): c = Column(name='a') assert isinstance(c, np.ndarray) c2 = c * 2 assert isinstance(c2, Column) assert isinstance(c2, np.ndarray) def test_numpy_ops(self, Column): """Show that basic numpy operations with Column behave sensibly""" arr = np.array([1, 2, 3]) c = Column(arr, name='a') for op, test_equal in ((operator.eq, True), (operator.ne, False), (operator.ge, True), (operator.gt, False), (operator.le, True), (operator.lt, False)): for eq in (op(c, arr), op(arr, c)): assert np.all(eq) if test_equal else not np.any(eq) assert len(eq) == 3 if Column is table.Column: assert type(eq) == np.ndarray else: assert type(eq) == np.ma.core.MaskedArray assert eq.dtype.str == '|b1' lt = c - 1 < arr assert np.all(lt) def test_numpy_boolean_ufuncs(self, Column): """Show that basic numpy operations with Column behave sensibly""" arr = np.array([1, 2, 3]) c = Column(arr, name='a') for ufunc, test_true in ((np.isfinite, True), (np.isinf, False), (np.isnan, False), (np.sign, True), (np.signbit, False)): result = ufunc(c) assert len(result) == len(c) assert np.all(result) if test_true else not np.any(result) if Column is table.Column: assert type(result) == np.ndarray else: assert type(result) == np.ma.core.MaskedArray if ufunc is not np.sign: assert result.dtype.str == '|b1' def test_view(self, Column): c = np.array([1, 2, 3], dtype=np.int64).view(Column) assert repr(c) == "<{0} dtype='int64' length=3>\n1\n2\n3".format(Column.__name__) def test_format(self, Column): """Show that the formatted output from str() works""" from ... import conf with conf.set_temp('max_lines', 8): c1 = Column(np.arange(2000), name='a', dtype=float, format='%6.2f') assert str(c1).splitlines() == [' a ', '-------', ' 0.00', ' 1.00', ' ...', '1998.00', '1999.00', 'Length = 2000 rows'] def test_convert_numpy_array(self, Column): d = Column([1, 2, 3], name='a', dtype='i8') np_data = np.array(d) assert np.all(np_data == d) np_data = np.array(d, copy=False) assert np.all(np_data == d) np_data = np.array(d, dtype='i4') assert np.all(np_data == d) def test_convert_unit(self, Column): d = Column([1, 2, 3], name='a', dtype="f8", unit="m") d.convert_unit_to("km") assert np.all(d.data == [0.001, 0.002, 0.003]) def test_array_wrap(self): """Test that the __array_wrap__ method converts a reduction ufunc output that has a different shape into an ndarray view. Without this a method call like c.mean() returns a Column array object with length=1.""" # Mean and sum for a 1-d float column c = table.Column(name='a', data=[1., 2., 3.]) assert np.allclose(c.mean(), 2.0) assert isinstance(c.mean(), (np.floating, float)) assert np.allclose(c.sum(), 6.) assert isinstance(c.sum(), (np.floating, float)) # Non-reduction ufunc preserves Column class assert isinstance(np.cos(c), table.Column) # Sum for a 1-d int column c = table.Column(name='a', data=[1, 2, 3]) assert np.allclose(c.sum(), 6) assert isinstance(c.sum(), (np.integer, int)) # Sum for a 2-d int column c = table.Column(name='a', data=[[1, 2, 3], [4, 5, 6]]) assert c.sum() == 21 assert isinstance(c.sum(), (np.integer, int)) assert np.all(c.sum(axis=0) == [5, 7, 9]) assert c.sum(axis=0).shape == (3,) assert isinstance(c.sum(axis=0), np.ndarray) # Sum and mean for a 1-d masked column c = table.MaskedColumn(name='a', data=[1., 2., 3.], mask=[0, 0, 1]) assert np.allclose(c.mean(), 1.5) assert isinstance(c.mean(), (np.floating, float)) assert np.allclose(c.sum(), 3.) assert isinstance(c.sum(), (np.floating, float)) def test_name_none(self, Column): """Can create a column without supplying name, which defaults to None""" c = Column([1, 2]) assert c.name is None assert np.all(c == np.array([1, 2])) def test_quantity_init(self, Column): c = Column(data=np.array([1, 2, 3]) * u.m) assert np.all(c.data == np.array([1, 2, 3])) assert np.all(c.unit == u.m) c = Column(data=np.array([1, 2, 3]) * u.m, unit=u.cm) assert np.all(c.data == np.array([100, 200, 300])) assert np.all(c.unit == u.cm) def test_attrs_survive_getitem_after_change(self, Column): """ Test for issue #3023: when calling getitem with a MaskedArray subclass the original object attributes are not copied. """ c1 = Column([1, 2, 3], name='a', unit='m', format='%i', description='aa', meta={'a': 1}) c1.name = 'b' c1.unit = 'km' c1.format = '%d' c1.description = 'bb' c1.meta = {'bbb': 2} for item in (slice(None, None), slice(None, 1), np.array([0, 2]), np.array([False, True, False])): c2 = c1[item] assert c2.name == 'b' assert c2.unit is u.km assert c2.format == '%d' assert c2.description == 'bb' assert c2.meta == {'bbb': 2} # Make sure that calling getitem resulting in a scalar does # not copy attributes. val = c1[1] for attr in ('name', 'unit', 'format', 'description', 'meta'): assert not hasattr(val, attr) def test_to_quantity(self, Column): d = Column([1, 2, 3], name='a', dtype="f8", unit="m") assert np.all(d.quantity == ([1, 2, 3.] * u.m)) assert np.all(d.quantity.value == ([1, 2, 3.] * u.m).value) assert np.all(d.quantity == d.to('m')) assert np.all(d.quantity.value == d.to('m').value) np.testing.assert_allclose(d.to(u.km).value, ([.001, .002, .003] * u.km).value) np.testing.assert_allclose(d.to('km').value, ([.001, .002, .003] * u.km).value) np.testing.assert_allclose(d.to(u.MHz, u.equivalencies.spectral()).value, [299.792458, 149.896229, 99.93081933]) d_nounit = Column([1, 2, 3], name='a', dtype="f8", unit=None) with pytest.raises(u.UnitsError): d_nounit.to(u.km) assert np.all(d_nounit.to(u.dimensionless_unscaled) == np.array([1, 2, 3])) # make sure the correct copy/no copy behavior is happening q = [1, 3, 5]*u.km # to should always make a copy d.to(u.km)[:] = q np.testing.assert_allclose(d, [1, 2, 3]) # explcit copying of the quantity should not change the column d.quantity.copy()[:] = q np.testing.assert_allclose(d, [1, 2, 3]) # but quantity directly is a "view", accessing the underlying column d.quantity[:] = q np.testing.assert_allclose(d, [1000, 3000, 5000]) # view should also work for integers d2 = Column([1, 2, 3], name='a', dtype=int, unit="m") d2.quantity[:] = q np.testing.assert_allclose(d2, [1000, 3000, 5000]) # but it should fail for strings or other non-numeric tables d3 = Column(['arg', 'name', 'stuff'], name='a', unit="m") with pytest.raises(TypeError): d3.quantity def test_item_access_type(self, Column): """ Tests for #3095, which forces integer item access to always return a plain ndarray or MaskedArray, even in the case of a multi-dim column. """ integer_types = (int, np.int_) for int_type in integer_types: c = Column([[1, 2], [3, 4]]) i0 = int_type(0) i1 = int_type(1) assert np.all(c[i0] == [1, 2]) assert type(c[i0]) == (np.ma.MaskedArray if hasattr(Column, 'mask') else np.ndarray) assert c[i0].shape == (2,) c01 = c[i0:i1] assert np.all(c01 == [[1, 2]]) assert isinstance(c01, Column) assert c01.shape == (1, 2) c = Column([1, 2]) assert np.all(c[i0] == 1) assert isinstance(c[i0], np.integer) assert c[i0].shape == () c01 = c[i0:i1] assert np.all(c01 == [1]) assert isinstance(c01, Column) assert c01.shape == (1,) def test_insert_basic(self, Column): c = Column([0, 1, 2], name='a', dtype=int, unit='mJy', format='%i', description='test column', meta={'c': 8, 'd': 12}) # Basic insert c1 = c.insert(1, 100) assert np.all(c1 == [0, 100, 1, 2]) assert c1.attrs_equal(c) assert type(c) is type(c1) if hasattr(c1, 'mask'): assert c1.data.shape == c1.mask.shape c1 = c.insert(-1, 100) assert np.all(c1 == [0, 1, 100, 2]) c1 = c.insert(3, 100) assert np.all(c1 == [0, 1, 2, 100]) c1 = c.insert(-3, 100) assert np.all(c1 == [100, 0, 1, 2]) c1 = c.insert(1, [100, 200, 300]) if hasattr(c1, 'mask'): assert c1.data.shape == c1.mask.shape # Out of bounds index with pytest.raises((ValueError, IndexError)): c1 = c.insert(-4, 100) with pytest.raises((ValueError, IndexError)): c1 = c.insert(4, 100) def test_insert_axis(self, Column): """Insert with non-default axis kwarg""" c = Column([[1, 2], [3, 4]]) c1 = c.insert(1, [5, 6], axis=None) assert np.all(c1 == [1, 5, 6, 2, 3, 4]) c1 = c.insert(1, [5, 6], axis=1) assert np.all(c1 == [[1, 5, 2], [3, 6, 4]]) def test_insert_multidim(self, Column): c = Column([[1, 2], [3, 4]], name='a', dtype=int) # Basic insert c1 = c.insert(1, [100, 200]) assert np.all(c1 == [[1, 2], [100, 200], [3, 4]]) # Broadcast c1 = c.insert(1, 100) assert np.all(c1 == [[1, 2], [100, 100], [3, 4]]) # Wrong shape with pytest.raises(ValueError): c1 = c.insert(1, [100, 200, 300]) def test_insert_object(self, Column): c = Column(['a', 1, None], name='a', dtype=object) # Basic insert c1 = c.insert(1, [100, 200]) assert np.all(c1 == ['a', [100, 200], 1, None]) def test_insert_masked(self): c = table.MaskedColumn([0, 1, 2], name='a', mask=[False, True, False]) # Basic insert c1 = c.insert(1, 100) assert np.all(c1.data.data == [0, 100, 1, 2]) assert np.all(c1.data.mask == [False, False, True, False]) assert type(c) is type(c1) for mask in (False, True): c1 = c.insert(1, 100, mask=mask) assert np.all(c1.data.data == [0, 100, 1, 2]) assert np.all(c1.data.mask == [False, mask, True, False]) def test_insert_masked_multidim(self): c = table.MaskedColumn([[1, 2], [3, 4]], name='a', dtype=int) c1 = c.insert(1, [100, 200], mask=True) assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]]) assert np.all(c1.data.mask == [[False, False], [True, True], [False, False]]) c1 = c.insert(1, [100, 200], mask=[True, False]) assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]]) assert np.all(c1.data.mask == [[False, False], [True, False], [False, False]]) with pytest.raises(ValueError): c1 = c.insert(1, [100, 200], mask=[True, False, True]) def test_mask_on_non_masked_table(self): """ When table is not masked and trying to set mask on column then it's Raise AttributeError. """ t = table.Table([[1, 2], [3, 4]], names=('a', 'b'), dtype=('i4', 'f8')) with pytest.raises(AttributeError): t['a'].mask = [True, False] class TestAttrEqual(): """Bunch of tests originally from ATpy that test the attrs_equal method.""" def test_5(self, Column): c1 = Column(name='a', dtype=int, unit='mJy') c2 = Column(name='a', dtype=int, unit='mJy') assert c1.attrs_equal(c2) def test_6(self, Column): c1 = Column(name='a', dtype=int, unit='mJy', format='%i', description='test column', meta={'c': 8, 'd': 12}) c2 = Column(name='a', dtype=int, unit='mJy', format='%i', description='test column', meta={'c': 8, 'd': 12}) assert c1.attrs_equal(c2) def test_7(self, Column): c1 = Column(name='a', dtype=int, unit='mJy', format='%i', description='test column', meta={'c': 8, 'd': 12}) c2 = Column(name='b', dtype=int, unit='mJy', format='%i', description='test column', meta={'c': 8, 'd': 12}) assert not c1.attrs_equal(c2) def test_8(self, Column): c1 = Column(name='a', dtype=int, unit='mJy', format='%i', description='test column', meta={'c': 8, 'd': 12}) c2 = Column(name='a', dtype=float, unit='mJy', format='%i', description='test column', meta={'c': 8, 'd': 12}) assert not c1.attrs_equal(c2) def test_9(self, Column): c1 = Column(name='a', dtype=int, unit='mJy', format='%i', description='test column', meta={'c': 8, 'd': 12}) c2 = Column(name='a', dtype=int, unit='erg.cm-2.s-1.Hz-1', format='%i', description='test column', meta={'c': 8, 'd': 12}) assert not c1.attrs_equal(c2) def test_10(self, Column): c1 = Column(name='a', dtype=int, unit='mJy', format='%i', description='test column', meta={'c': 8, 'd': 12}) c2 = Column(name='a', dtype=int, unit='mJy', format='%g', description='test column', meta={'c': 8, 'd': 12}) assert not c1.attrs_equal(c2) def test_11(self, Column): c1 = Column(name='a', dtype=int, unit='mJy', format='%i', description='test column', meta={'c': 8, 'd': 12}) c2 = Column(name='a', dtype=int, unit='mJy', format='%i', description='another test column', meta={'c': 8, 'd': 12}) assert not c1.attrs_equal(c2) def test_12(self, Column): c1 = Column(name='a', dtype=int, unit='mJy', format='%i', description='test column', meta={'c': 8, 'd': 12}) c2 = Column(name='a', dtype=int, unit='mJy', format='%i', description='test column', meta={'e': 8, 'd': 12}) assert not c1.attrs_equal(c2) def test_13(self, Column): c1 = Column(name='a', dtype=int, unit='mJy', format='%i', description='test column', meta={'c': 8, 'd': 12}) c2 = Column(name='a', dtype=int, unit='mJy', format='%i', description='test column', meta={'c': 9, 'd': 12}) assert not c1.attrs_equal(c2) def test_col_and_masked_col(self): c1 = table.Column(name='a', dtype=int, unit='mJy', format='%i', description='test column', meta={'c': 8, 'd': 12}) c2 = table.MaskedColumn(name='a', dtype=int, unit='mJy', format='%i', description='test column', meta={'c': 8, 'd': 12}) assert c1.attrs_equal(c2) assert c2.attrs_equal(c1) # Check that the meta descriptor is working as expected. The MetaBaseTest class # takes care of defining all the tests, and we simply have to define the class # and any minimal set of args to pass. from ...utils.tests.test_metadata import MetaBaseTest class TestMetaColumn(MetaBaseTest): test_class = table.Column args = () class TestMetaMaskedColumn(MetaBaseTest): test_class = table.MaskedColumn args = () def test_getitem_metadata_regression(): """ Regression test for #1471: MaskedArray does not call __array_finalize__ so the meta-data was not getting copied over. By overloading _update_from we are able to work around this bug. """ # Make sure that meta-data gets propagated with __getitem__ c = table.Column(data=[1, 2], name='a', description='b', unit='m', format="%i", meta={'c': 8}) assert c[1:2].name == 'a' assert c[1:2].description == 'b' assert c[1:2].unit == 'm' assert c[1:2].format == '%i' assert c[1:2].meta['c'] == 8 c = table.MaskedColumn(data=[1, 2], name='a', description='b', unit='m', format="%i", meta={'c': 8}) assert c[1:2].name == 'a' assert c[1:2].description == 'b' assert c[1:2].unit == 'm' assert c[1:2].format == '%i' assert c[1:2].meta['c'] == 8 # As above, but with take() - check the method and the function c = table.Column(data=[1, 2, 3], name='a', description='b', unit='m', format="%i", meta={'c': 8}) for subset in [c.take([0, 1]), np.take(c, [0, 1])]: assert subset.name == 'a' assert subset.description == 'b' assert subset.unit == 'm' assert subset.format == '%i' assert subset.meta['c'] == 8 # Metadata isn't copied for scalar values for subset in [c.take(0), np.take(c, 0)]: assert subset == 1 assert subset.shape == () assert not isinstance(subset, table.Column) c = table.MaskedColumn(data=[1, 2, 3], name='a', description='b', unit='m', format="%i", meta={'c': 8}) for subset in [c.take([0, 1]), np.take(c, [0, 1])]: assert subset.name == 'a' assert subset.description == 'b' assert subset.unit == 'm' assert subset.format == '%i' assert subset.meta['c'] == 8 # Metadata isn't copied for scalar values for subset in [c.take(0), np.take(c, 0)]: assert subset == 1 assert subset.shape == () assert not isinstance(subset, table.MaskedColumn) def test_unicode_guidelines(): arr = np.array([1, 2, 3]) c = table.Column(arr, name='a') assert_follows_unicode_guidelines(c) def test_scalar_column(): """ Column is not designed to hold scalars, but for numpy 1.6 this can happen: >> type(np.std(table.Column([1, 2]))) astropy.table.column.Column """ c = table.Column(1.5) assert repr(c) == '1.5' assert str(c) == '1.5' def test_qtable_column_conversion(): """ Ensures that a QTable that gets assigned a unit switches to be Quantity-y """ qtab = table.QTable([[1, 2], [3, 4.2]], names=['i', 'f']) assert isinstance(qtab['i'], table.column.Column) assert isinstance(qtab['f'], table.column.Column) qtab['i'].unit = 'km/s' assert isinstance(qtab['i'], u.Quantity) assert isinstance(qtab['f'], table.column.Column) # should follow from the above, but good to make sure as a #4497 regression test assert isinstance(qtab['i'][0], u.Quantity) assert isinstance(qtab[0]['i'], u.Quantity) assert not isinstance(qtab['f'][0], u.Quantity) assert not isinstance(qtab[0]['f'], u.Quantity) # Regression test for #5342: if a function unit is assigned, the column # should become the appropriate FunctionQuantity subclass. qtab['f'].unit = u.dex(u.cm/u.s**2) assert isinstance(qtab['f'], u.Dex) @pytest.mark.parametrize('masked', [True, False]) def test_string_truncation_warning(masked): """ Test warnings associated with in-place assignment to a string column that results in truncation of the right hand side. """ t = table.Table([['aa', 'bb']], names=['a'], masked=masked) with catch_warnings() as w: from inspect import currentframe, getframeinfo t['a'][1] = 'cc' assert len(w) == 0 t['a'][:] = 'dd' assert len(w) == 0 with catch_warnings() as w: frameinfo = getframeinfo(currentframe()) t['a'][0] = 'eee' # replace item with string that gets truncated assert t['a'][0] == 'ee' assert len(w) == 1 assert ('truncated right side string(s) longer than 2 character(s)' in str(w[0].message)) # Make sure the warning points back to the user code line assert w[0].lineno == frameinfo.lineno + 1 assert w[0].category is table.StringTruncateWarning assert 'test_column' in w[0].filename with catch_warnings() as w: t['a'][:] = ['ff', 'ggg'] # replace item with string that gets truncated assert np.all(t['a'] == ['ff', 'gg']) assert len(w) == 1 assert ('truncated right side string(s) longer than 2 character(s)' in str(w[0].message)) with catch_warnings() as w: # Test the obscure case of assigning from an array that was originally # wider than any of the current elements (i.e. dtype is U4 but actual # elements are U1 at the time of assignment). val = np.array(['ffff', 'gggg']) val[:] = ['f', 'g'] t['a'][:] = val assert np.all(t['a'] == ['f', 'g']) assert len(w) == 0 def test_string_truncation_warning_masked(): """ Test warnings associated with in-place assignment to a string to a masked column, specifically where the right hand side contains np.ma.masked. """ # Test for strings, but also cover assignment of np.ma.masked to # int and float masked column setting. This was previously only # covered in an unrelated io.ascii test (test_line_endings) which # showed an unexpected difference between handling of str and numeric # masked arrays. for values in (['a', 'b'], [1, 2], [1.0, 2.0]): mc = table.MaskedColumn(values) with catch_warnings() as w: mc[1] = np.ma.masked assert len(w) == 0 assert np.all(mc.mask == [False, True]) mc[:] = np.ma.masked assert len(w) == 0 assert np.all(mc.mask == [True, True]) mc = table.MaskedColumn(['aa', 'bb']) with catch_warnings() as w: mc[:] = [np.ma.masked, 'ggg'] # replace item with string that gets truncated assert mc[1] == 'gg' assert np.all(mc.mask == [True, False]) assert len(w) == 1 assert ('truncated right side string(s) longer than 2 character(s)' in str(w[0].message)) @pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn)) def test_col_unicode_sandwich_create_from_str(Column): """ Create a bytestring Column from strings (including unicode) in Py3. """ # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding. # Stress the system by injecting non-ASCII characters. uba = u'bä' c = Column([uba, 'def'], dtype='S') assert c.dtype.char == 'S' assert c[0] == uba assert isinstance(c[0], str) assert isinstance(c[:0], table.Column) assert np.all(c[:2] == np.array([uba, 'def'])) @pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn)) def test_col_unicode_sandwich_bytes(Column): """ Create a bytestring Column from bytes and ensure that it works in Python 3 in a convenient way like in Python 2. """ # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding. # Stress the system by injecting non-ASCII characters. uba = u'bä' uba8 = uba.encode('utf-8') c = Column([uba8, b'def']) assert c.dtype.char == 'S' assert c[0] == uba assert isinstance(c[0], str) assert isinstance(c[:0], table.Column) assert np.all(c[:2] == np.array([uba, 'def'])) assert isinstance(c[:], table.Column) assert c[:].dtype.char == 'S' # Array / list comparisons assert np.all(c == [uba, 'def']) ok = c == [uba8, b'def'] assert type(ok) is type(c.data) assert ok.dtype.char == '?' assert np.all(ok) assert np.all(c == np.array([uba, u'def'])) assert np.all(c == np.array([uba8, b'def'])) # Scalar compare cmps = (uba, uba8) for cmp in cmps: ok = c == cmp assert type(ok) is type(c.data) assert np.all(ok == [True, False]) def test_col_unicode_sandwich_unicode(): """ Sanity check that Unicode Column behaves normally. """ # On Py2 the unicode must be ASCII-compatible, else the final test fails. uba = u'bä' uba8 = uba.encode('utf-8') c = table.Column([uba, 'def'], dtype='U') assert c[0] == uba assert isinstance(c[:0], table.Column) assert isinstance(c[0], str) assert np.all(c[:2] == np.array([uba, 'def'])) assert isinstance(c[:], table.Column) assert c[:].dtype.char == 'U' ok = c == [uba, 'def'] assert type(ok) == np.ndarray assert ok.dtype.char == '?' assert np.all(ok) assert np.all(c != [uba8, b'def']) def test_masked_col_unicode_sandwich(): """ Create a bytestring MaskedColumn and ensure that it works in Python 3 in a convenient way like in Python 2. """ c = table.MaskedColumn([b'abc', b'def']) c[1] = np.ma.masked assert isinstance(c[:0], table.MaskedColumn) assert isinstance(c[0], str) assert c[0] == 'abc' assert c[1] is np.ma.masked assert isinstance(c[:], table.MaskedColumn) assert c[:].dtype.char == 'S' ok = c == ['abc', 'def'] assert ok[0] == True assert ok[1] is np.ma.masked assert np.all(c == [b'abc', b'def']) assert np.all(c == np.array([u'abc', u'def'])) assert np.all(c == np.array([b'abc', b'def'])) for cmp in (u'abc', b'abc'): ok = c == cmp assert type(ok) is np.ma.MaskedArray assert ok[0] == True assert ok[1] is np.ma.masked @pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn)) def test_unicode_sandwich_set(Column): """ Test setting """ uba = u'bä' c = Column([b'abc', b'def']) c[0] = b'aa' assert np.all(c == [u'aa', u'def']) c[0] = uba # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding assert np.all(c == [uba, u'def']) assert c.pformat() == [u'None', u'----', ' ' + uba, u' def'] c[:] = b'cc' assert np.all(c == [u'cc', u'cc']) c[:] = uba assert np.all(c == [uba, uba]) c[:] = '' c[:] = [uba, b'def'] assert np.all(c == [uba, b'def']) @pytest.mark.parametrize('class1', [table.MaskedColumn, table.Column]) @pytest.mark.parametrize('class2', [table.MaskedColumn, table.Column, str, list]) def test_unicode_sandwich_compare(class1, class2): """Test that comparing a bytestring Column/MaskedColumn with various str (unicode) object types gives the expected result. Tests #6838. """ obj1 = class1([b'a', b'c']) if class2 is str: obj2 = 'a' elif class2 is list: obj2 = ['a', 'b'] else: obj2 = class2(['a', 'b']) assert np.all((obj1 == obj2) == [True, False]) assert np.all((obj2 == obj1) == [True, False]) assert np.all((obj1 != obj2) == [False, True]) assert np.all((obj2 != obj1) == [False, True]) assert np.all((obj1 > obj2) == [False, True]) assert np.all((obj2 > obj1) == [False, False]) assert np.all((obj1 <= obj2) == [True, False]) assert np.all((obj2 <= obj1) == [True, True]) assert np.all((obj1 < obj2) == [False, False]) assert np.all((obj2 < obj1) == [False, True]) assert np.all((obj1 >= obj2) == [True, True]) assert np.all((obj2 >= obj1) == [True, False]) def test_unicode_sandwich_masked_compare(): """Test the fix for #6839 from #6899.""" c1 = table.MaskedColumn(['a', 'b', 'c', 'd'], mask=[True, False, True, False]) c2 = table.MaskedColumn([b'a', b'b', b'c', b'd'], mask=[True, True, False, False]) for cmp in ((c1 == c2), (c2 == c1)): assert cmp[0] is np.ma.masked assert cmp[1] is np.ma.masked assert cmp[2] is np.ma.masked assert cmp[3] for cmp in ((c1 != c2), (c2 != c1)): assert cmp[0] is np.ma.masked assert cmp[1] is np.ma.masked assert cmp[2] is np.ma.masked assert not cmp[3] # Note: comparisons <, >, >=, <= fail to return a masked array entirely, # see https://github.com/numpy/numpy/issues/10092.
da9927052930ba446948717a707ff65c5ddc50c7c519dc50ab4e1e315b46bae8
# Licensed under a 3-clause BSD style license - see LICENSE.rst import sys import pytest import numpy as np from ... import table from ...table import Row from ... import units as u from .conftest import MaskedTable def test_masked_row_with_object_col(): """ Numpy < 1.8 has a bug in masked array that prevents access a row if there is a column with object type. """ t = table.Table([[1]], dtype=['O'], masked=True) t['col0'].mask = False assert t[0]['col0'] == 1 t['col0'].mask = True assert t[0]['col0'] is np.ma.masked @pytest.mark.usefixtures('table_types') class TestRow(): def _setup(self, table_types): self._table_type = table_types.Table self._column_type = table_types.Column @property def t(self): # py.test wants to run this method once before table_types is run # to set Table and Column. In this case just return None, which would # cause any downstream test to fail if this happened in any other context. if self._column_type is None: return None if not hasattr(self, '_t'): a = self._column_type(name='a', data=[1, 2, 3], dtype='i8') b = self._column_type(name='b', data=[4, 5, 6], dtype='i8') self._t = self._table_type([a, b]) return self._t def test_subclass(self, table_types): """Row is subclass of ndarray and Row""" self._setup(table_types) c = Row(self.t, 2) assert isinstance(c, Row) def test_values(self, table_types): """Row accurately reflects table values and attributes""" self._setup(table_types) table = self.t row = table[1] assert row['a'] == 2 assert row['b'] == 5 assert row[0] == 2 assert row[1] == 5 assert row.meta is table.meta assert row.colnames == table.colnames assert row.columns is table.columns with pytest.raises(IndexError): row[2] if sys.byteorder == 'little': assert str(row.dtype) == "[('a', '<i8'), ('b', '<i8')]" else: assert str(row.dtype) == "[('a', '>i8'), ('b', '>i8')]" def test_ref(self, table_types): """Row is a reference into original table data""" self._setup(table_types) table = self.t row = table[1] row['a'] = 10 if table_types.Table is not MaskedTable: assert table['a'][1] == 10 def test_left_equal(self, table_types): """Compare a table row to the corresponding structured array row""" self._setup(table_types) np_t = self.t.as_array() if table_types.Table is MaskedTable: with pytest.raises(ValueError): self.t[0] == np_t[0] else: for row, np_row in zip(self.t, np_t): assert np.all(row == np_row) def test_left_not_equal(self, table_types): """Compare a table row to the corresponding structured array row""" self._setup(table_types) np_t = self.t.as_array() np_t['a'] = [0, 0, 0] if table_types.Table is MaskedTable: with pytest.raises(ValueError): self.t[0] == np_t[0] else: for row, np_row in zip(self.t, np_t): assert np.all(row != np_row) def test_right_equal(self, table_types): """Test right equal""" self._setup(table_types) np_t = self.t.as_array() if table_types.Table is MaskedTable: with pytest.raises(ValueError): self.t[0] == np_t[0] else: for row, np_row in zip(self.t, np_t): assert np.all(np_row == row) def test_convert_numpy_array(self, table_types): self._setup(table_types) d = self.t[1] np_data = np.array(d) if table_types.Table is not MaskedTable: assert np.all(np_data == d.as_void()) assert np_data is not d.as_void() assert d.colnames == list(np_data.dtype.names) np_data = np.array(d, copy=False) if table_types.Table is not MaskedTable: assert np.all(np_data == d.as_void()) assert np_data is not d.as_void() assert d.colnames == list(np_data.dtype.names) with pytest.raises(ValueError): np_data = np.array(d, dtype=[(str('c'), 'i8'), (str('d'), 'i8')]) def test_format_row(self, table_types): """Test formatting row""" self._setup(table_types) table = self.t row = table[0] assert repr(row).splitlines() == ['<{0} {1}{2}>' .format(row.__class__.__name__, 'index=0', ' masked=True' if table.masked else ''), ' a b ', 'int64 int64', '----- -----', ' 1 4'] assert str(row).splitlines() == [' a b ', '--- ---', ' 1 4'] assert row._repr_html_().splitlines() == ['<i>{0} {1}{2}</i>' .format(row.__class__.__name__, 'index=0', ' masked=True' if table.masked else ''), '<table id="table{0}">'.format(id(table)), '<thead><tr><th>a</th><th>b</th></tr></thead>', '<thead><tr><th>int64</th><th>int64</th></tr></thead>', '<tr><td>1</td><td>4</td></tr>', '</table>'] def test_as_void(self, table_types): """Test the as_void() method""" self._setup(table_types) table = self.t row = table[0] # If masked then with no masks, issue numpy/numpy#483 should come # into play. Make sure as_void() code is working. row_void = row.as_void() if table.masked: assert isinstance(row_void, np.ma.mvoid) else: assert isinstance(row_void, np.void) assert row_void['a'] == 1 assert row_void['b'] == 4 # Confirm row is a view of table but row_void is not. table['a'][0] = -100 assert row['a'] == -100 assert row_void['a'] == 1 # Make sure it works for a table that has masked elements if table.masked: table['a'].mask = True # row_void is not a view, need to re-make assert row_void['a'] == 1 row_void = row.as_void() # but row is a view assert row['a'] is np.ma.masked def test_row_and_as_void_with_objects(self, table_types): """Test the deprecated data property and as_void() method""" t = table_types.Table([[{'a': 1}, {'b': 2}]], names=('a',)) assert t[0][0] == {'a': 1} assert t[0]['a'] == {'a': 1} assert t[0].as_void()[0] == {'a': 1} assert t[0].as_void()['a'] == {'a': 1} def test_bounds_checking(self, table_types): """Row gives index error upon creation for out-of-bounds index""" self._setup(table_types) for ibad in (-5, -4, 3, 4): with pytest.raises(IndexError): self.t[ibad] def test_row_tuple_column_slice(): """ Test getting and setting a row using a tuple or list of column names """ t = table.QTable([[1, 2, 3] * u.m, [10., 20., 30.], [100., 200., 300.], ['x', 'y', 'z']], names=['a', 'b', 'c', 'd']) # Get a row for index=1 r1 = t[1] # Column slice with tuple of col names r1_abc = r1['a', 'b', 'c'] # Row object for these cols r1_abc_repr = ['<Row index=1>', ' a b c ', ' m ', 'float64 float64 float64', '------- ------- -------', ' 2.0 20.0 200.0'] assert repr(r1_abc).splitlines() == r1_abc_repr # Column slice with list of col names r1_abc = r1[['a', 'b', 'c']] assert repr(r1_abc).splitlines() == r1_abc_repr # Make sure setting on a tuple or slice updates parent table and row r1['c'] = 1000 r1['a', 'b'] = 1000 * u.cm, 100. assert r1['a'] == 10 * u.m assert r1['b'] == 100 assert t['a'][1] == 10 * u.m assert t['b'][1] == 100. assert t['c'][1] == 1000 # Same but using a list of column names instead of tuple r1[['a', 'b']] = 2000 * u.cm, 200. assert r1['a'] == 20 * u.m assert r1['b'] == 200 assert t['a'][1] == 20 * u.m assert t['b'][1] == 200. # Set column slice of column slice r1_abc['a', 'c'] = -1 * u.m, -10 assert t['a'][1] == -1 * u.m assert t['b'][1] == 200. assert t['c'][1] == -10. # Bad column name with pytest.raises(KeyError) as err: t[1]['a', 'not_there'] assert "KeyError: 'not_there'" in str(err) # Too many values with pytest.raises(ValueError) as err: t[1]['a', 'b'] = 1 * u.m, 2, 3 assert 'right hand side must be a sequence' in str(err) # Something without a length with pytest.raises(ValueError) as err: t[1]['a', 'b'] = 1 assert 'right hand side must be a sequence' in str(err) def test_row_tuple_column_slice_transaction(): """ Test that setting a row that fails part way through does not change the table at all. """ t = table.QTable([[10., 20., 30.], [1, 2, 3] * u.m], names=['a', 'b']) tc = t.copy() # First one succeeds but second fails. with pytest.raises(ValueError) as err: t[1]['a', 'b'] = (-1, -1 * u.s) # Bad unit assert "'s' (time) and 'm' (length) are not convertible" in str(err) assert t[1] == tc[1]
59d31f974be779429dcadb9150c63bcf914755b841e9d6097de45994ca299130
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst try: import h5py # pylint: disable=W0611 except ImportError: HAS_H5PY = False else: HAS_H5PY = True try: import yaml # pylint: disable=W0611 HAS_YAML = True except ImportError: HAS_YAML = False import copy import pickle from io import StringIO import pytest import numpy as np from ...coordinates import EarthLocation from ...table import Table, QTable, join, hstack, vstack, Column, NdarrayMixin from ... import time from ... import coordinates from ... import units as u from ..column import BaseColumn from .. import table_helpers from .conftest import MIXIN_COLS def test_attributes(mixin_cols): """ Required attributes for a column can be set. """ m = mixin_cols['m'] m.info.name = 'a' assert m.info.name == 'a' m.info.description = 'a' assert m.info.description == 'a' # Cannot set unit for these classes if isinstance(m, (u.Quantity, coordinates.SkyCoord, time.Time)): with pytest.raises(AttributeError): m.info.unit = u.m else: m.info.unit = u.m assert m.info.unit is u.m m.info.format = 'a' assert m.info.format == 'a' m.info.meta = {'a': 1} assert m.info.meta == {'a': 1} with pytest.raises(AttributeError): m.info.bad_attr = 1 with pytest.raises(AttributeError): m.info.bad_attr def check_mixin_type(table, table_col, in_col): # We check for QuantityInfo rather than just isinstance(col, u.Quantity) # since we want to treat EarthLocation as a mixin, even though it is # a Quantity subclass. if ((isinstance(in_col.info, u.QuantityInfo) and type(table) is not QTable) or isinstance(in_col, Column)): assert type(table_col) is table.ColumnClass else: assert type(table_col) is type(in_col) # Make sure in_col got copied and creating table did not touch it assert in_col.info.name is None def test_make_table(table_types, mixin_cols): """ Make a table with the columns in mixin_cols, which is an ordered dict of three cols: 'a' and 'b' are table_types.Column type, and 'm' is a mixin. """ t = table_types.Table(mixin_cols) check_mixin_type(t, t['m'], mixin_cols['m']) cols = list(mixin_cols.values()) t = table_types.Table(cols, names=('i', 'a', 'b', 'm')) check_mixin_type(t, t['m'], mixin_cols['m']) t = table_types.Table(cols) check_mixin_type(t, t['col3'], mixin_cols['m']) def test_io_ascii_write(): """ Test that table with mixin column can be written by io.ascii for every pure Python writer. No validation of the output is done, this just confirms no exceptions. """ from ...io.ascii.connect import _get_connectors_table t = QTable(MIXIN_COLS) for fmt in _get_connectors_table(): if fmt['Format'] == 'ascii.ecsv' and not HAS_YAML: continue if fmt['Write'] and '.fast_' not in fmt['Format']: out = StringIO() t.write(out, format=fmt['Format']) def test_votable_quantity_write(tmpdir): """ Test that table with Quantity mixin column can be round-tripped by io.votable. Note that FITS and HDF5 mixin support are tested (much more thoroughly) in their respective subpackage tests (io/fits/tests/test_connect.py and io/misc/tests/test_hdf5.py). """ t = QTable() t['a'] = u.Quantity([1, 2, 4], unit='Angstrom') filename = str(tmpdir.join('table-tmp')) t.write(filename, format='votable', overwrite=True) qt = QTable.read(filename, format='votable') assert isinstance(qt['a'], u.Quantity) assert qt['a'].unit == 'Angstrom' @pytest.mark.parametrize('table_types', (Table, QTable)) def test_io_time_write_fits(tmpdir, table_types): """ Test that table with Time mixin columns can be written by io.fits. Validation of the output is done. Test that io.fits writes a table containing Time mixin columns that can be partially round-tripped (metadata scale, location). """ t = table_types([[1,2], ['string', 'column']]) for scale in time.TIME_SCALES: t['a'+scale] = time.Time([[1,2],[3,4]], format='cxcsec', scale=scale, location=EarthLocation(-2446354, 4237210, 4077985, unit='m')) t['b'+scale] = time.Time(['1999-01-01T00:00:00.123456789', '2010-01-01T00:00:00'], format='isot', scale=scale) t['c'] = [3., 4.] filename = str(tmpdir.join('table-tmp')) # Show that FITS format succeeds t.write(filename, format='fits', overwrite=True) tm = table_types.read(filename, format='fits', astropy_native=True) for scale in time.TIME_SCALES: for ab in ('a', 'b'): name = ab + scale # Assert that the time columns are read as Time assert isinstance(tm[name], time.Time) # Assert that the scales round-trip assert tm[name].scale == t[name].scale # Assert that the format is jd assert tm[name].format == 'jd' # Assert that the location round-trips assert tm[name].location == t[name].location # Finally assert that the column data round-trips assert (tm[name] == t[name]).all() for name in ('col0', 'col1', 'c'): # Assert that the non-time columns are read as Column assert isinstance(tm[name], Column) # Assert that the non-time columns' data round-trips assert (tm[name] == t[name]).all() # Test for conversion of time data to its value, as defined by its format for scale in time.TIME_SCALES: for ab in ('a', 'b'): name = ab + scale t[name].info.serialize_method['fits'] = 'formatted_value' t.write(filename, format='fits', overwrite=True) tm = table_types.read(filename, format='fits') for scale in time.TIME_SCALES: for ab in ('a', 'b'): name = ab + scale assert not isinstance(tm[name], time.Time) assert (tm[name] == t[name].value).all() def test_votable_mixin_write_fail(mixin_cols): """ Test that table with mixin columns (excluding Quantity) cannot be written by io.votable. """ t = QTable(mixin_cols) # Only do this test if there are unsupported column types (i.e. anything besides # BaseColumn and Quantity class instances). unsupported_cols = t.columns.not_isinstance((BaseColumn, u.Quantity)) if not unsupported_cols: pytest.skip("no unsupported column types") out = StringIO() with pytest.raises(ValueError) as err: t.write(out, format='votable') assert 'cannot write table with mixin column(s)' in str(err.value) def test_join(table_types): """ Join tables with mixin cols. Use column "i" as proxy for what the result should be for each mixin. """ t1 = table_types.Table() t1['a'] = table_types.Column(['a', 'b', 'b', 'c']) t1['i'] = table_types.Column([0, 1, 2, 3]) for name, col in MIXIN_COLS.items(): t1[name] = col t2 = table_types.Table(t1) t2['a'] = ['b', 'c', 'a', 'd'] for name, col in MIXIN_COLS.items(): t1[name].info.description = name t2[name].info.description = name + '2' for join_type in ('inner', 'left'): t12 = join(t1, t2, keys='a', join_type=join_type) idx1 = t12['i_1'] idx2 = t12['i_2'] for name, col in MIXIN_COLS.items(): name1 = name + '_1' name2 = name + '_2' assert_table_name_col_equal(t12, name1, col[idx1]) assert_table_name_col_equal(t12, name2, col[idx2]) assert t12[name1].info.description == name assert t12[name2].info.description == name + '2' for join_type in ('outer', 'right'): with pytest.raises(NotImplementedError) as exc: t12 = join(t1, t2, keys='a', join_type=join_type) assert 'join requires masking column' in str(exc.value) with pytest.raises(ValueError) as exc: t12 = join(t1, t2, keys=['a', 'skycoord']) assert 'not allowed as a key column' in str(exc.value) # Join does work for a mixin which is a subclass of np.ndarray t12 = join(t1, t2, keys=['quantity']) assert np.all(t12['a_1'] == t1['a']) def test_hstack(table_types): """ Hstack tables with mixin cols. Use column "i" as proxy for what the result should be for each mixin. """ t1 = table_types.Table() t1['i'] = table_types.Column([0, 1, 2, 3]) for name, col in MIXIN_COLS.items(): t1[name] = col t1[name].info.description = name t1[name].info.meta = {'a': 1} for join_type in ('inner', 'outer'): for chop in (True, False): t2 = table_types.Table(t1) if chop: t2 = t2[:-1] if join_type == 'outer': with pytest.raises(NotImplementedError) as exc: t12 = hstack([t1, t2], join_type=join_type) assert 'hstack requires masking column' in str(exc.value) continue t12 = hstack([t1, t2], join_type=join_type) idx1 = t12['i_1'] idx2 = t12['i_2'] for name, col in MIXIN_COLS.items(): name1 = name + '_1' name2 = name + '_2' assert_table_name_col_equal(t12, name1, col[idx1]) assert_table_name_col_equal(t12, name2, col[idx2]) for attr in ('description', 'meta'): assert getattr(t1[name].info, attr) == getattr(t12[name1].info, attr) assert getattr(t2[name].info, attr) == getattr(t12[name2].info, attr) def assert_table_name_col_equal(t, name, col): """ Assert all(t[name] == col), with special handling for known mixin cols. """ if isinstance(col, coordinates.SkyCoord): assert np.all(t[name].ra == col.ra) assert np.all(t[name].dec == col.dec) elif isinstance(col, u.Quantity): if type(t) is QTable: assert np.all(t[name] == col) elif isinstance(col, table_helpers.ArrayWrapper): assert np.all(t[name].data == col.data) else: assert np.all(t[name] == col) def test_get_items(mixin_cols): """ Test that slicing / indexing table gives right values and col attrs inherit """ attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta') m = mixin_cols['m'] m.info.name = 'm' m.info.format = '{0}' m.info.description = 'd' m.info.meta = {'a': 1} t = QTable([m]) for item in ([1, 3], np.array([0, 2]), slice(1, 3)): t2 = t[item] m2 = m[item] assert_table_name_col_equal(t2, 'm', m[item]) for attr in attrs: assert getattr(t2['m'].info, attr) == getattr(m.info, attr) assert getattr(m2.info, attr) == getattr(m.info, attr) def test_info_preserved_pickle_copy_init(mixin_cols): """ Test copy, pickle, and init from class roundtrip preserve info. This tests not only the mixin classes but a regular column as well. """ def pickle_roundtrip(c): return pickle.loads(pickle.dumps(c)) def init_from_class(c): return c.__class__(c) attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta') for colname in ('i', 'm'): m = mixin_cols[colname] m.info.name = colname m.info.format = '{0}' m.info.description = 'd' m.info.meta = {'a': 1} for func in (copy.copy, copy.deepcopy, pickle_roundtrip, init_from_class): m2 = func(m) for attr in attrs: assert getattr(m2.info, attr) == getattr(m.info, attr) def test_add_column(mixin_cols): """ Test that adding a column preserves values and attributes """ attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta') m = mixin_cols['m'] assert m.info.name is None # Make sure adding column in various ways doesn't touch t = QTable([m], names=['a']) assert m.info.name is None t['new'] = m assert m.info.name is None m.info.name = 'm' m.info.format = '{0}' m.info.description = 'd' m.info.meta = {'a': 1} t = QTable([m]) # Add columns m2, m3, m4 by two different methods and test expected equality t['m2'] = m m.info.name = 'm3' t.add_columns([m], copy=True) m.info.name = 'm4' t.add_columns([m], copy=False) for name in ('m2', 'm3', 'm4'): assert_table_name_col_equal(t, name, m) for attr in attrs: if attr != 'name': assert getattr(t['m'].info, attr) == getattr(t[name].info, attr) # Also check that one can set using a scalar. s = m[0] if type(s) is type(m): # We're not going to worry about testing classes for which scalars # are a different class than the real array (and thus loose info, etc.) t['s'] = m[0] assert_table_name_col_equal(t, 's', m[0]) for attr in attrs: if attr != 'name': assert getattr(t['m'].info, attr) == getattr(t['s'].info, attr) # While we're add it, also check a length-1 table. t = QTable([m[1:2]], names=['m']) if type(s) is type(m): t['s'] = m[0] assert_table_name_col_equal(t, 's', m[0]) for attr in attrs: if attr != 'name': assert getattr(t['m'].info, attr) == getattr(t['s'].info, attr) def test_vstack(): """ Vstack tables with mixin cols. """ t1 = QTable(MIXIN_COLS) t2 = QTable(MIXIN_COLS) with pytest.raises(NotImplementedError): vstack([t1, t2]) def test_insert_row(mixin_cols): """ Test inserting a row, which only works for BaseColumn and Quantity """ t = QTable(mixin_cols) t['m'].info.description = 'd' if isinstance(t['m'], (u.Quantity, Column)): t.insert_row(1, t[-1]) assert t[1] == t[-1] assert t['m'].info.description == 'd' else: with pytest.raises(ValueError) as exc: t.insert_row(1, t[-1]) assert "Unable to insert row" in str(exc.value) def test_insert_row_bad_unit(): """ Insert a row into a QTable with the wrong unit """ t = QTable([[1] * u.m]) with pytest.raises(ValueError) as exc: t.insert_row(0, (2 * u.m / u.s,)) assert "'m / s' (speed) and 'm' (length) are not convertible" in str(exc.value) def test_convert_np_array(mixin_cols): """ Test that converting to numpy array creates an object dtype and that each instance in the array has the expected type. """ t = QTable(mixin_cols) ta = t.as_array() m = mixin_cols['m'] dtype_kind = m.dtype.kind if hasattr(m, 'dtype') else 'O' assert ta['m'].dtype.kind == dtype_kind def test_assignment_and_copy(): """ Test that assignment of an int, slice, and fancy index works. Along the way test that copying table works. """ for name in ('quantity', 'arraywrap'): m = MIXIN_COLS[name] t0 = QTable([m], names=['m']) for i0, i1 in ((1, 2), (slice(0, 2), slice(1, 3)), (np.array([1, 2]), np.array([2, 3]))): t = t0.copy() t['m'][i0] = m[i1] if name == 'arraywrap': assert np.all(t['m'].data[i0] == m.data[i1]) assert np.all(t0['m'].data[i0] == m.data[i0]) assert np.all(t0['m'].data[i0] != t['m'].data[i0]) else: assert np.all(t['m'][i0] == m[i1]) assert np.all(t0['m'][i0] == m[i0]) assert np.all(t0['m'][i0] != t['m'][i0]) def test_grouping(): """ Test grouping with mixin columns. Raises not yet implemented error. """ t = QTable(MIXIN_COLS) t['index'] = ['a', 'b', 'b', 'c'] with pytest.raises(NotImplementedError): t.group_by('index') def test_conversion_qtable_table(): """ Test that a table round trips from QTable => Table => QTable """ qt = QTable(MIXIN_COLS) names = qt.colnames for name in names: qt[name].info.description = name t = Table(qt) for name in names: assert t[name].info.description == name if name == 'quantity': assert np.all(t['quantity'] == qt['quantity'].value) assert np.all(t['quantity'].unit is qt['quantity'].unit) assert isinstance(t['quantity'], t.ColumnClass) else: assert_table_name_col_equal(t, name, qt[name]) qt2 = QTable(qt) for name in names: assert qt2[name].info.description == name assert_table_name_col_equal(qt2, name, qt[name]) def test_setitem_as_column_name(): """ Test for mixin-related regression described in #3321. """ t = Table() t['a'] = ['x', 'y'] t['b'] = 'b' # Previously was failing with KeyError assert np.all(t['a'] == ['x', 'y']) assert np.all(t['b'] == ['b', 'b']) def test_quantity_representation(): """ Test that table representation of quantities does not have unit """ t = QTable([[1, 2] * u.m]) assert t.pformat() == ['col0', ' m ', '----', ' 1.0', ' 2.0'] def test_skycoord_representation(): """ Test that skycoord representation works, both in the way that the values are output and in changing the frame representation. """ # With no unit we get "None" in the unit row c = coordinates.SkyCoord([0], [1], [0], representation='cartesian') t = Table([c]) assert t.pformat() == [' col0 ', 'None,None,None', '--------------', ' 0.0,1.0,0.0'] # Test that info works with a dynamically changed representation c = coordinates.SkyCoord([0], [1], [0], unit='m', representation='cartesian') t = Table([c]) assert t.pformat() == [' col0 ', ' m,m,m ', '-----------', '0.0,1.0,0.0'] t['col0'].representation = 'unitspherical' assert t.pformat() == [' col0 ', 'deg,deg ', '--------', '90.0,0.0'] t['col0'].representation = 'cylindrical' assert t.pformat() == [' col0 ', ' m,deg,m ', '------------', '1.0,90.0,0.0'] def test_ndarray_mixin(): """ Test directly adding a plain structured array into a table instead of the view as an NdarrayMixin. Once added as an NdarrayMixin then all the previous tests apply. """ a = np.array([(1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')], dtype='<i4,' + ('|U1')) b = np.array([(10, 'aa'), (20, 'bb'), (30, 'cc'), (40, 'dd')], dtype=[('x', 'i4'), ('y', ('U2'))]) c = np.rec.fromrecords([(100, 'raa'), (200, 'rbb'), (300, 'rcc'), (400, 'rdd')], names=['rx', 'ry']) d = np.arange(8).reshape(4, 2).view(NdarrayMixin) # Add one during initialization and the next as a new column. t = Table([a], names=['a']) t['b'] = b t['c'] = c t['d'] = d assert isinstance(t['a'], NdarrayMixin) assert t['a'][1][1] == a[1][1] assert t['a'][2][0] == a[2][0] assert t[1]['a'][1] == a[1][1] assert t[2]['a'][0] == a[2][0] assert isinstance(t['b'], NdarrayMixin) assert t['b'][1]['x'] == b[1]['x'] assert t['b'][1]['y'] == b[1]['y'] assert t[1]['b']['x'] == b[1]['x'] assert t[1]['b']['y'] == b[1]['y'] assert isinstance(t['c'], NdarrayMixin) assert t['c'][1]['rx'] == c[1]['rx'] assert t['c'][1]['ry'] == c[1]['ry'] assert t[1]['c']['rx'] == c[1]['rx'] assert t[1]['c']['ry'] == c[1]['ry'] assert isinstance(t['d'], NdarrayMixin) assert t['d'][1][0] == d[1][0] assert t['d'][1][1] == d[1][1] assert t[1]['d'][0] == d[1][0] assert t[1]['d'][1] == d[1][1] assert t.pformat() == [' a b c d [2] ', '-------- ---------- ------------ ------', "(1, 'a') (10, 'aa') (100, 'raa') 0 .. 1", "(2, 'b') (20, 'bb') (200, 'rbb') 2 .. 3", "(3, 'c') (30, 'cc') (300, 'rcc') 4 .. 5", "(4, 'd') (40, 'dd') (400, 'rdd') 6 .. 7"] def test_possible_string_format_functions(): """ The QuantityInfo info class for Quantity implements a possible_string_format_functions() method that overrides the standard pprint._possible_string_format_functions() function. Test this. """ t = QTable([[1, 2] * u.m]) t['col0'].info.format = '%.3f' assert t.pformat() == [' col0', ' m ', '-----', '1.000', '2.000'] t['col0'].info.format = 'hi {:.3f}' assert t.pformat() == [' col0 ', ' m ', '--------', 'hi 1.000', 'hi 2.000'] t['col0'].info.format = '.4f' assert t.pformat() == [' col0 ', ' m ', '------', '1.0000', '2.0000'] def test_rename_mixin_columns(mixin_cols): """ Rename a mixin column. """ t = QTable(mixin_cols) tc = t.copy() t.rename_column('m', 'mm') assert t.colnames == ['i', 'a', 'b', 'mm'] if isinstance(t['mm'], table_helpers.ArrayWrapper): assert np.all(t['mm'].data == tc['m'].data) elif isinstance(t['mm'], coordinates.SkyCoord): assert np.all(t['mm'].ra == tc['m'].ra) assert np.all(t['mm'].dec == tc['m'].dec) else: assert np.all(t['mm'] == tc['m'])
7d9592c0438956ca8b697c96f3f52a1b2d66f54804543074c7266a8a5d0d0047
# This Python file uses the following encoding: utf-8 # Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from ... import table from ...table import Table, QTable from ...table.table_helpers import simple_table from ... import units as u from ...utils import console BIG_WIDE_ARR = np.arange(2000, dtype=np.float64).reshape(100, 20) SMALL_ARR = np.arange(18, dtype=np.int64).reshape(6, 3) @pytest.mark.usefixtures('table_type') class TestMultiD(): def test_multidim(self, table_type): """Test printing with multidimensional column""" arr = [np.array([[1, 2], [10, 20]], dtype=np.int64), np.array([[3, 4], [30, 40]], dtype=np.int64), np.array([[5, 6], [50, 60]], dtype=np.int64)] t = table_type(arr) lines = t.pformat() assert lines == ['col0 [2] col1 [2] col2 [2]', '-------- -------- --------', ' 1 .. 2 3 .. 4 5 .. 6', '10 .. 20 30 .. 40 50 .. 60'] lines = t.pformat(html=True) assert lines == ['<table id="table{id}">'.format(id=id(t)), '<thead><tr><th>col0 [2]</th><th>col1 [2]</th><th>col2 [2]</th></tr></thead>', '<tr><td>1 .. 2</td><td>3 .. 4</td><td>5 .. 6</td></tr>', '<tr><td>10 .. 20</td><td>30 .. 40</td><td>50 .. 60</td></tr>', '</table>'] nbclass = table.conf.default_notebook_table_class assert t._repr_html_().splitlines() == [ '<i>{0} masked={1} length=2</i>'.format(table_type.__name__, t.masked), '<table id="table{id}" class="{nbclass}">'.format(id=id(t), nbclass=nbclass), '<thead><tr><th>col0 [2]</th><th>col1 [2]</th><th>col2 [2]</th></tr></thead>', '<thead><tr><th>int64</th><th>int64</th><th>int64</th></tr></thead>', '<tr><td>1 .. 2</td><td>3 .. 4</td><td>5 .. 6</td></tr>', '<tr><td>10 .. 20</td><td>30 .. 40</td><td>50 .. 60</td></tr>', '</table>'] t = table_type([arr]) lines = t.pformat() assert lines == ['col0 [2,2]', '----------', ' 1 .. 20', ' 3 .. 40', ' 5 .. 60'] def test_fake_multidim(self, table_type): """Test printing with 'fake' multidimensional column""" arr = [np.array([[(1,)], [(10,)]], dtype=np.int64), np.array([[(3,)], [(30,)]], dtype=np.int64), np.array([[(5,)], [(50,)]], dtype=np.int64)] t = table_type(arr) lines = t.pformat() assert lines == ['col0 [1,1] col1 [1,1] col2 [1,1]', '---------- ---------- ----------', ' 1 3 5', ' 10 30 50'] lines = t.pformat(html=True) assert lines == ['<table id="table{id}">'.format(id=id(t)), '<thead><tr><th>col0 [1,1]</th><th>col1 [1,1]</th><th>col2 [1,1]</th></tr></thead>', '<tr><td>1</td><td>3</td><td>5</td></tr>', '<tr><td>10</td><td>30</td><td>50</td></tr>', '</table>'] nbclass = table.conf.default_notebook_table_class assert t._repr_html_().splitlines() == [ '<i>{0} masked={1} length=2</i>'.format(table_type.__name__, t.masked), '<table id="table{id}" class="{nbclass}">'.format(id=id(t), nbclass=nbclass), '<thead><tr><th>col0 [1,1]</th><th>col1 [1,1]</th><th>col2 [1,1]</th></tr></thead>', '<thead><tr><th>int64</th><th>int64</th><th>int64</th></tr></thead>', '<tr><td>1</td><td>3</td><td>5</td></tr>', u'<tr><td>10</td><td>30</td><td>50</td></tr>', '</table>'] t = table_type([arr]) lines = t.pformat() assert lines == ['col0 [2,1,1]', '------------', ' 1 .. 10', ' 3 .. 30', ' 5 .. 50'] def test_html_escaping(): t = table.Table([(str('<script>alert("gotcha");</script>'), 2, 3)]) nbclass = table.conf.default_notebook_table_class assert t._repr_html_().splitlines() == [ '<i>Table length=3</i>', '<table id="table{id}" class="{nbclass}">'.format(id=id(t), nbclass=nbclass), '<thead><tr><th>col0</th></tr></thead>', '<thead><tr><th>str33</th></tr></thead>', '<tr><td>&lt;script&gt;alert(&quot;gotcha&quot;);&lt;/script&gt;</td></tr>', '<tr><td>2</td></tr>', '<tr><td>3</td></tr>', '</table>'] @pytest.mark.usefixtures('table_type') class TestPprint(): def _setup(self, table_type): self.tb = table_type(BIG_WIDE_ARR) self.tb['col0'].format = 'e' self.tb['col1'].format = '.6f' self.tb['col0'].unit = 'km**2' self.tb['col19'].unit = 'kg s m**-2' self.ts = table_type(SMALL_ARR) def test_empty_table(self, table_type): t = table_type() lines = t.pformat() assert lines == ['<No columns>'] c = repr(t) assert c.splitlines() == ['<{0} masked={1} length=0>'.format(table_type.__name__, t.masked), '<No columns>'] def test_format0(self, table_type): """Try getting screen size but fail to defaults because testing doesn't have access to screen (fcntl.ioctl fails). """ self._setup(table_type) arr = np.arange(4000, dtype=np.float64).reshape(100, 40) lines = table_type(arr).pformat() nlines, width = console.terminal_size() assert len(lines) == nlines for line in lines[:-1]: # skip last "Length = .. rows" line assert width - 10 < len(line) <= width def test_format1(self, table_type): """Basic test of formatting, unit header row included""" self._setup(table_type) lines = self.tb.pformat(max_lines=8, max_width=40) assert lines == [' col0 col1 ... col19 ', ' km2 ... kg s / m2', '------------ ----------- ... ---------', '0.000000e+00 1.000000 ... 19.0', ' ... ... ... ...', '1.960000e+03 1961.000000 ... 1979.0', '1.980000e+03 1981.000000 ... 1999.0', 'Length = 100 rows'] def test_format2(self, table_type): """Basic test of formatting, unit header row excluded""" self._setup(table_type) lines = self.tb.pformat(max_lines=8, max_width=40, show_unit=False) assert lines == [' col0 col1 ... col19 ', '------------ ----------- ... ------', '0.000000e+00 1.000000 ... 19.0', '2.000000e+01 21.000000 ... 39.0', ' ... ... ... ...', '1.960000e+03 1961.000000 ... 1979.0', '1.980000e+03 1981.000000 ... 1999.0', 'Length = 100 rows'] def test_format3(self, table_type): """Include the unit header row""" self._setup(table_type) lines = self.tb.pformat(max_lines=8, max_width=40, show_unit=True) assert lines == [' col0 col1 ... col19 ', ' km2 ... kg s / m2', '------------ ----------- ... ---------', '0.000000e+00 1.000000 ... 19.0', ' ... ... ... ...', '1.960000e+03 1961.000000 ... 1979.0', '1.980000e+03 1981.000000 ... 1999.0', 'Length = 100 rows'] def test_format4(self, table_type): """Do not include the name header row""" self._setup(table_type) lines = self.tb.pformat(max_lines=8, max_width=40, show_name=False) assert lines == [' km2 ... kg s / m2', '------------ ----------- ... ---------', '0.000000e+00 1.000000 ... 19.0', '2.000000e+01 21.000000 ... 39.0', ' ... ... ... ...', '1.960000e+03 1961.000000 ... 1979.0', '1.980000e+03 1981.000000 ... 1999.0', 'Length = 100 rows'] def test_noclip(self, table_type): """Basic table print""" self._setup(table_type) lines = self.ts.pformat(max_lines=-1, max_width=-1) assert lines == ['col0 col1 col2', '---- ---- ----', ' 0 1 2', ' 3 4 5', ' 6 7 8', ' 9 10 11', ' 12 13 14', ' 15 16 17'] def test_clip1(self, table_type): """max lines below hard limit of 8 """ self._setup(table_type) lines = self.ts.pformat(max_lines=3, max_width=-1) assert lines == ['col0 col1 col2', '---- ---- ----', ' 0 1 2', ' 3 4 5', ' 6 7 8', ' 9 10 11', ' 12 13 14', ' 15 16 17'] def test_clip2(self, table_type): """max lines below hard limit of 8 and output longer than 8 """ self._setup(table_type) lines = self.ts.pformat(max_lines=3, max_width=-1, show_unit=True, show_dtype=True) assert lines == [' col0 col1 col2', ' ', 'int64 int64 int64', '----- ----- -----', ' 0 1 2', ' ... ... ...', ' 15 16 17', 'Length = 6 rows'] def test_clip3(self, table_type): """Max lines below hard limit of 8 and max width below hard limit of 10 """ self._setup(table_type) lines = self.ts.pformat(max_lines=3, max_width=1, show_unit=True) assert lines == ['col0 ...', ' ...', '---- ...', ' 0 ...', ' ... ...', ' 12 ...', ' 15 ...', 'Length = 6 rows'] def test_clip4(self, table_type): """Test a range of max_lines""" self._setup(table_type) for max_lines in (0, 1, 4, 5, 6, 7, 8, 100, 101, 102, 103, 104, 130): lines = self.tb.pformat(max_lines=max_lines, show_unit=False) assert len(lines) == max(8, min(102, max_lines)) @pytest.mark.usefixtures('table_type') class TestFormat(): def test_column_format(self, table_type): t = table_type([[1, 2], [3, 4]], names=('a', 'b')) # default (format=None) assert str(t['a']) == ' a \n---\n 1\n 2' # just a plain format string t['a'].format = '5.2f' assert str(t['a']) == ' a \n-----\n 1.00\n 2.00' # Old-style that is almost new-style t['a'].format = '{ %4.2f }' assert str(t['a']) == ' a \n--------\n{ 1.00 }\n{ 2.00 }' # New-style that is almost old-style t['a'].format = '%{0:}' assert str(t['a']) == ' a \n---\n %1\n %2' # New-style with extra spaces t['a'].format = ' {0:05d} ' assert str(t['a']) == ' a \n-------\n 00001 \n 00002 ' # New-style has precedence t['a'].format = '%4.2f {0:}' assert str(t['a']) == ' a \n-------\n%4.2f 1\n%4.2f 2' # Invalid format spec with pytest.raises(ValueError): t['a'].format = 'fail' assert t['a'].format == '%4.2f {0:}' # format did not change def test_column_format_with_threshold(self, table_type): from ... import conf with conf.set_temp('max_lines', 8): t = table_type([np.arange(20)], names=['a']) t['a'].format = '%{0:}' assert str(t['a']).splitlines() == [' a ', '---', ' %0', ' %1', '...', '%18', '%19', 'Length = 20 rows'] t['a'].format = '{ %4.2f }' assert str(t['a']).splitlines() == [' a ', '---------', ' { 0.00 }', ' { 1.00 }', ' ...', '{ 18.00 }', '{ 19.00 }', 'Length = 20 rows'] def test_column_format_func(self, table_type): # run most of functions twice # 1) astropy.table.pprint._format_funcs gets populated # 2) astropy.table.pprint._format_funcs gets used t = table_type([[1., 2.], [3, 4]], names=('a', 'b')) # mathematical function t['a'].format = lambda x: str(x * 3.) assert str(t['a']) == ' a \n---\n3.0\n6.0' assert str(t['a']) == ' a \n---\n3.0\n6.0' def test_column_format_callable(self, table_type): # run most of functions twice # 1) astropy.table.pprint._format_funcs gets populated # 2) astropy.table.pprint._format_funcs gets used t = table_type([[1., 2.], [3, 4]], names=('a', 'b')) # mathematical function class format: def __call__(self, x): return str(x * 3.) t['a'].format = format() assert str(t['a']) == ' a \n---\n3.0\n6.0' assert str(t['a']) == ' a \n---\n3.0\n6.0' def test_column_format_func_wrong_number_args(self, table_type): t = table_type([[1., 2.], [3, 4]], names=('a', 'b')) # function that expects wrong number of arguments def func(a, b): pass with pytest.raises(ValueError): t['a'].format = func def test_column_format_func_multiD(self, table_type): arr = [np.array([[1, 2], [10, 20]])] t = table_type(arr, names=['a']) # mathematical function t['a'].format = lambda x: str(x * 3.) outstr = ' a [2] \n------------\n 3.0 .. 6.0\n30.0 .. 60.0' assert str(t['a']) == outstr assert str(t['a']) == outstr def test_column_format_func_not_str(self, table_type): t = table_type([[1., 2.], [3, 4]], names=('a', 'b')) # mathematical function with pytest.raises(ValueError): t['a'].format = lambda x: x * 3 def test_column_alignment(self, table_type): t = table_type([[1], [2], [3], [4]], names=('long title a', 'long title b', 'long title c', 'long title d')) t['long title a'].format = '<' t['long title b'].format = '^' t['long title c'].format = '>' t['long title d'].format = '0=' assert str(t['long title a']) == 'long title a\n------------\n1 ' assert str(t['long title b']) == 'long title b\n------------\n 2 ' assert str(t['long title c']) == 'long title c\n------------\n 3' assert str(t['long title d']) == 'long title d\n------------\n000000000004' class TestFormatWithMaskedElements(): def test_column_format(self): t = Table([[1, 2, 3], [3, 4, 5]], names=('a', 'b'), masked=True) t['a'].mask = [True, False, True] # default (format=None) assert str(t['a']) == ' a \n---\n --\n 2\n --' # just a plain format string t['a'].format = '5.2f' assert str(t['a']) == ' a \n-----\n --\n 2.00\n --' # Old-style that is almost new-style t['a'].format = '{ %4.2f }' assert str(t['a']) == ' a \n--------\n --\n{ 2.00 }\n --' # New-style that is almost old-style t['a'].format = '%{0:}' assert str(t['a']) == ' a \n---\n --\n %2\n --' # New-style with extra spaces t['a'].format = ' {0:05d} ' assert str(t['a']) == ' a \n-------\n --\n 00002 \n --' # New-style has precedence t['a'].format = '%4.2f {0:}' assert str(t['a']) == ' a \n-------\n --\n%4.2f 2\n --' def test_column_format_with_threshold(self, table_type): from ... import conf with conf.set_temp('max_lines', 8): t = table_type([np.arange(20)], names=['a']) t['a'].format = '%{0:}' t['a'].mask[0] = True t['a'].mask[-1] = True assert str(t['a']).splitlines() == [' a ', '---', ' --', ' %1', '...', '%18', ' --', 'Length = 20 rows'] t['a'].format = '{ %4.2f }' assert str(t['a']).splitlines() == [' a ', '---------', ' --', ' { 1.00 }', ' ...', '{ 18.00 }', ' --', 'Length = 20 rows'] def test_column_format_func(self): # run most of functions twice # 1) astropy.table.pprint._format_funcs gets populated # 2) astropy.table.pprint._format_funcs gets used t = Table([[1., 2., 3.], [3, 4, 5]], names=('a', 'b'), masked=True) t['a'].mask = [True, False, True] # mathematical function t['a'].format = lambda x: str(x * 3.) assert str(t['a']) == ' a \n---\n --\n6.0\n --' assert str(t['a']) == ' a \n---\n --\n6.0\n --' def test_column_format_func_with_special_masked(self): # run most of functions twice # 1) astropy.table.pprint._format_funcs gets populated # 2) astropy.table.pprint._format_funcs gets used t = Table([[1., 2., 3.], [3, 4, 5]], names=('a', 'b'), masked=True) t['a'].mask = [True, False, True] # mathematical function def format_func(x): if x is np.ma.masked: return '!!' else: return str(x * 3.) t['a'].format = format_func assert str(t['a']) == ' a \n---\n !!\n6.0\n !!' assert str(t['a']) == ' a \n---\n !!\n6.0\n !!' def test_column_format_callable(self): # run most of functions twice # 1) astropy.table.pprint._format_funcs gets populated # 2) astropy.table.pprint._format_funcs gets used t = Table([[1., 2., 3.], [3, 4, 5]], names=('a', 'b'), masked=True) t['a'].mask = [True, False, True] # mathematical function class format: def __call__(self, x): return str(x * 3.) t['a'].format = format() assert str(t['a']) == ' a \n---\n --\n6.0\n --' assert str(t['a']) == ' a \n---\n --\n6.0\n --' def test_column_format_func_wrong_number_args(self): t = Table([[1., 2.], [3, 4]], names=('a', 'b'), masked=True) t['a'].mask = [True, False] # function that expects wrong number of arguments def func(a, b): pass with pytest.raises(ValueError): t['a'].format = func # but if all are masked, it never gets called t['a'].mask = [True, True] assert str(t['a']) == ' a \n---\n --\n --' def test_column_format_func_multiD(self): arr = [np.array([[1, 2], [10, 20]])] t = Table(arr, names=['a'], masked=True) t['a'].mask[0, 1] = True t['a'].mask[1, 1] = True # mathematical function t['a'].format = lambda x: str(x * 3.) outstr = ' a [2] \n----------\n 3.0 .. --\n30.0 .. --' assert str(t['a']) == outstr assert str(t['a']) == outstr def test_pprint_npfloat32(): """ Test for #148, that np.float32 cannot by itself be formatted as float, but has to be converted to a python float. """ dat = np.array([1., 2.], dtype=np.float32) t = Table([dat], names=['a']) t['a'].format = '5.2f' assert str(t['a']) == ' a \n-----\n 1.00\n 2.00' def test_pprint_py3_bytes(): """ Test for #1346 and #4944. Make sure a bytestring (dtype=S<N>) in Python 3 is printed correctly (without the "b" prefix like b'string'). """ val = bytes('val', encoding='utf-8') blah = u'bläh'.encode('utf-8') dat = np.array([val, blah], dtype=[(str('col'), 'S10')]) t = table.Table(dat) assert t['col'].pformat() == ['col ', '----', ' val', u'bläh'] def test_pprint_nameless_col(): """Regression test for #2213, making sure a nameless column can be printed using None as the name. """ col = table.Column([1., 2.]) assert str(col).startswith('None') def test_html(): """Test HTML printing""" dat = np.array([1., 2.], dtype=np.float32) t = Table([dat], names=['a']) lines = t.pformat(html=True) assert lines == ['<table id="table{id}">'.format(id=id(t)), u'<thead><tr><th>a</th></tr></thead>', u'<tr><td>1.0</td></tr>', u'<tr><td>2.0</td></tr>', u'</table>'] lines = t.pformat(html=True, tableclass='table-striped') assert lines == [ '<table id="table{id}" class="table-striped">'.format(id=id(t)), u'<thead><tr><th>a</th></tr></thead>', u'<tr><td>1.0</td></tr>', u'<tr><td>2.0</td></tr>', u'</table>'] lines = t.pformat(html=True, tableclass=['table', 'table-striped']) assert lines == [ '<table id="table{id}" class="table table-striped">'.format(id=id(t)), u'<thead><tr><th>a</th></tr></thead>', u'<tr><td>1.0</td></tr>', u'<tr><td>2.0</td></tr>', u'</table>'] def test_align(): t = simple_table(2, kinds='iS') assert t.pformat() == [' a b ', '--- ---', ' 1 b', ' 2 c'] # Use column format attribute t['a'].format = '<' assert t.pformat() == [' a b ', '--- ---', '1 b', '2 c'] # Now override column format attribute with various combinations of align tpf = [' a b ', '--- ---', ' 1 b ', ' 2 c '] for align in ('^', ['^', '^'], ('^', '^')): assert tpf == t.pformat(align=align) assert t.pformat(align='<') == [' a b ', '--- ---', '1 b ', '2 c '] assert t.pformat(align='0=') == [' a b ', '--- ---', '001 00b', '002 00c'] assert t.pformat(align=['<', '^']) == [' a b ', '--- ---', '1 b ', '2 c '] # Now use fill characters. Stress the system using a fill # character that is the same as an align character. t = simple_table(2, kinds='iS') assert t.pformat(align='^^') == [' a b ', '--- ---', '^1^ ^b^', '^2^ ^c^'] assert t.pformat(align='^>') == [' a b ', '--- ---', '^^1 ^^b', '^^2 ^^c'] assert t.pformat(align='^<') == [' a b ', '--- ---', '1^^ b^^', '2^^ c^^'] # Complicated interaction (same as narrative docs example) t1 = Table([[1.0, 2.0], [1, 2]], names=['column1', 'column2']) t1['column1'].format = '#^.2f' assert t1.pformat() == ['column1 column2', '------- -------', '##1.00# 1', '##2.00# 2'] assert t1.pformat(align='!<') == ['column1 column2', '------- -------', '1.00!!! 1!!!!!!', '2.00!!! 2!!!!!!'] assert t1.pformat(align=[None, '!<']) == ['column1 column2', '------- -------', '##1.00# 1!!!!!!', '##2.00# 2!!!!!!'] # Zero fill t['a'].format = '+d' assert t.pformat(align='0=') == [' a b ', '--- ---', '+01 00b', '+02 00c'] with pytest.raises(ValueError): t.pformat(align=['fail']) with pytest.raises(TypeError): t.pformat(align=0) with pytest.raises(TypeError): t.pprint(align=0) # Make sure pprint() does not raise an exception t.pprint() with pytest.raises(ValueError): t.pprint(align=['<', '<', '<']) with pytest.raises(ValueError): t.pprint(align='x=') def test_auto_format_func(): """Test for #5802 (fix for #5800 where format_func key is not unique)""" t = Table([[1, 2] * u.m]) t['col0'].format = '%f' t.pformat() # Force caching of format function qt = QTable(t) qt.pformat() # Generates exception prior to #5802 def test_decode_replace(): """ Test printing a bytestring column with a value that fails decoding to utf-8 and gets replaced by U+FFFD. See https://docs.python.org/3/library/codecs.html#codecs.replace_errors """ t = Table([[b'Z\xf0']]) assert t.pformat() == [u'col0', u'----', u' Z\ufffd']
8b53974b9b1fef5899db55c99d3fcec3e98a8450cda7ed4a55363a3865fd26fd
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest from ..bst import BST def get_tree(TreeType): b = TreeType([], []) for val in [5, 2, 9, 3, 4, 1, 6, 10, 8, 7]: b.add(val) return b @pytest.fixture def tree(): return get_tree(BST) r''' 5 / \ 2 9 / \ / \ 1 3 6 10 \ \ 4 8 / 7 ''' @pytest.fixture def bst(): return tree() def test_bst_add(bst): root = bst.root assert root.data == [5] assert root.left.data == [2] assert root.right.data == [9] assert root.left.left.data == [1] assert root.left.right.data == [3] assert root.right.left.data == [6] assert root.right.right.data == [10] assert root.left.right.right.data == [4] assert root.right.left.right.data == [8] assert root.right.left.right.left.data == [7] def test_bst_dimensions(bst): assert bst.size == 10 assert bst.height == 4 def test_bst_find(tree): bst = tree for i in range(1, 11): node = bst.find(i) assert node == [i] assert bst.find(0) == [] assert bst.find(11) == [] assert bst.find('1') == [] def test_bst_traverse(bst): preord = [5, 2, 1, 3, 4, 9, 6, 8, 7, 10] inord = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] postord = [1, 4, 3, 2, 7, 8, 6, 10, 9, 5] traversals = {} for order in ('preorder', 'inorder', 'postorder'): traversals[order] = [x.key for x in bst.traverse(order)] assert traversals['preorder'] == preord assert traversals['inorder'] == inord assert traversals['postorder'] == postord def test_bst_remove(bst): order = (6, 9, 1, 3, 7, 2, 10, 5, 4, 8) vals = set(range(1, 11)) for i, val in enumerate(order): assert bst.remove(val) is True assert bst.is_valid() assert set([x.key for x in bst.traverse('inorder')]) == \ vals.difference(order[:i+1]) assert bst.size == 10 - i - 1 assert bst.remove(-val) is False def test_bst_duplicate(bst): bst.add(10, 11) assert bst.find(10) == [10, 11] assert bst.remove(10, data=10) is True assert bst.find(10) == [11] with pytest.raises(ValueError): bst.remove(10, data=30) # invalid data assert bst.remove(10) is True assert bst.remove(10) is False def test_bst_range(tree): bst = tree lst = bst.range_nodes(4, 8) assert sorted(x.key for x in lst) == [4, 5, 6, 7, 8] lst = bst.range_nodes(10, 11) assert [x.key for x in lst] == [10] lst = bst.range_nodes(11, 20) assert len(lst) == 0
c5321cdb3cad032187d4c0ff5fcb151da08044ed6df93dd099054453756efcd1
# Licensed under a 3-clause BSD style license - see LICENSE.rst import itertools import pytest import numpy as np from numpy.testing import assert_array_almost_equal_nulp, assert_allclose from ..convolve import convolve_fft from ...tests.helper import catch_warnings from ...utils.exceptions import AstropyUserWarning VALID_DTYPES = [] for dtype_array in ['>f4', '<f4', '>f8', '<f8']: for dtype_kernel in ['>f4', '<f4', '>f8', '<f8']: VALID_DTYPES.append((dtype_array, dtype_kernel)) BOUNDARY_OPTIONS = [None, 'fill', 'wrap'] NANTREATMENT_OPTIONS = ('interpolate', 'fill') """ What does convolution mean? We use the 'same size' assumption here (i.e., you expect an array of the exact same size as the one you put in) Convolving any array with a kernel that is [1] should result in the same array returned Working example array: [1, 2, 3, 4, 5] Convolved with [1] = [1, 2, 3, 4, 5] Convolved with [1, 1] = [1, 3, 5, 7, 9] THIS IS NOT CONSISTENT! Convolved with [1, 0] = [1, 2, 3, 4, 5] Convolved with [0, 1] = [0, 1, 2, 3, 4] """ # NOTE: use_numpy_fft is redundant if you don't have FFTW installed option_names = ('boundary', 'nan_treatment', 'normalize_kernel') options = list(itertools.product(BOUNDARY_OPTIONS, NANTREATMENT_OPTIONS, (True, False), )) option_names_preserve_nan = ('boundary', 'nan_treatment', 'normalize_kernel', 'preserve_nan') options_preserve_nan = list(itertools.product(BOUNDARY_OPTIONS, NANTREATMENT_OPTIONS, (True, False), (True, False))) class TestConvolve1D: @pytest.mark.parametrize(option_names, options) def test_unity_1_none(self, boundary, nan_treatment, normalize_kernel): ''' Test that a unit kernel with a single element returns the same array ''' x = np.array([1., 2., 3.], dtype='float64') y = np.array([1.], dtype='float64') z = convolve_fft(x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel) assert_array_almost_equal_nulp(z, x, 10) @pytest.mark.parametrize(option_names, options) def test_unity_3(self, boundary, nan_treatment, normalize_kernel): ''' Test that a unit kernel with three elements returns the same array (except when boundary is None). ''' x = np.array([1., 2., 3.], dtype='float64') y = np.array([0., 1., 0.], dtype='float64') z = convolve_fft(x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel) assert_array_almost_equal_nulp(z, x, 10) @pytest.mark.parametrize(option_names, options) def test_uniform_3(self, boundary, nan_treatment, normalize_kernel): ''' Test that the different modes are producing the correct results using a uniform kernel with three elements ''' x = np.array([1., 0., 3.], dtype='float64') y = np.array([1., 1., 1.], dtype='float64') z = convolve_fft(x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel) answer_key = (boundary, nan_treatment, normalize_kernel) answer_dict = { 'sum_fill_zeros': np.array([1., 4., 3.], dtype='float64'), 'average_fill_zeros': np.array([1 / 3., 4 / 3., 1.], dtype='float64'), 'sum_wrap': np.array([4., 4., 4.], dtype='float64'), 'average_wrap': np.array([4 / 3., 4 / 3., 4 / 3.], dtype='float64'), } result_dict = { # boundary, nan_treatment, normalize_kernel ('fill', 'interpolate', True): answer_dict['average_fill_zeros'], ('wrap', 'interpolate', True): answer_dict['average_wrap'], ('fill', 'interpolate', False): answer_dict['sum_fill_zeros'], ('wrap', 'interpolate', False): answer_dict['sum_wrap'], } for k in list(result_dict.keys()): result_dict[(k[0], 'fill', k[2])] = result_dict[k] for k in list(result_dict.keys()): if k[0] == 'fill': result_dict[(None, k[1], k[2])] = result_dict[k] assert_array_almost_equal_nulp(z, result_dict[answer_key], 10) @pytest.mark.parametrize(option_names, options) def test_halfity_3(self, boundary, nan_treatment, normalize_kernel): ''' Test that the different modes are producing the correct results using a uniform, non-unity kernel with three elements ''' x = np.array([1., 0., 3.], dtype='float64') y = np.array([0.5, 0.5, 0.5], dtype='float64') z = convolve_fft(x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel) answer_dict = { 'sum': np.array([0.5, 2.0, 1.5], dtype='float64'), 'sum_zeros': np.array([0.5, 2., 1.5], dtype='float64'), 'sum_nozeros': np.array([0.5, 2., 1.5], dtype='float64'), 'average': np.array([1 / 3., 4 / 3., 1.], dtype='float64'), 'sum_wrap': np.array([2., 2., 2.], dtype='float64'), 'average_wrap': np.array([4 / 3., 4 / 3., 4 / 3.], dtype='float64'), 'average_zeros': np.array([1 / 3., 4 / 3., 1.], dtype='float64'), 'average_nozeros': np.array([0.5, 4 / 3., 1.5], dtype='float64'), } if normalize_kernel: answer_key = 'average' else: answer_key = 'sum' if boundary == 'wrap': answer_key += '_wrap' else: # average = average_zeros; sum = sum_zeros answer_key += '_zeros' assert_array_almost_equal_nulp(z, answer_dict[answer_key], 10) @pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan) def test_unity_3_withnan(self, boundary, nan_treatment, normalize_kernel, preserve_nan): ''' Test that a unit kernel with three elements returns the same array (except when boundary is None). This version includes a NaN value in the original array. ''' x = np.array([1., np.nan, 3.], dtype='float64') y = np.array([0., 1., 0.], dtype='float64') z = convolve_fft(x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, preserve_nan=preserve_nan) if preserve_nan: assert np.isnan(z[1]) z = np.nan_to_num(z) # for whatever reason, numpy's fft has very limited precision, and # the comparison fails unless you cast the float64 to a float16 if hasattr(np, 'float16'): assert_array_almost_equal_nulp( np.asarray(z, dtype=np.float16), np.array([1., 0., 3.], dtype=np.float16), 10) # ASSERT equality to better than 16 bit but worse than 32 bit precision assert_allclose(z, np.array([1., 0., 3.]), atol=1e-14) inputs = (np.array([1., np.nan, 3.], dtype='float64'), np.array([1., np.inf, 3.], dtype='float64')) outputs = (np.array([1., 0., 3.], dtype='float64'), np.array([1., 0., 3.], dtype='float64')) options_unity1withnan = list(itertools.product(BOUNDARY_OPTIONS, NANTREATMENT_OPTIONS, (True, False), (True, False), inputs, outputs)) @pytest.mark.parametrize(option_names_preserve_nan + ('inval', 'outval'), options_unity1withnan) def test_unity_1_withnan(self, boundary, nan_treatment, normalize_kernel, preserve_nan, inval, outval): ''' Test that a unit kernel with three elements returns the same array (except when boundary is None). This version includes a NaN value in the original array. ''' x = inval y = np.array([1.], dtype='float64') z = convolve_fft(x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, preserve_nan=preserve_nan) if preserve_nan: assert np.isnan(z[1]) z = np.nan_to_num(z) # for whatever reason, numpy's fft has very limited precision, and # the comparison fails unless you cast the float64 to a float16 if hasattr(np, 'float16'): assert_array_almost_equal_nulp(np.asarray(z, dtype=np.float16), np.array([1., 0., 3.], dtype=np.float16), 10) assert_allclose(z, outval, atol=1e-14) @pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan) def test_uniform_3_withnan(self, boundary, nan_treatment, normalize_kernel, preserve_nan): ''' Test that the different modes are producing the correct results using a uniform kernel with three elements. This version includes a NaN value in the original array. ''' x = np.array([1., np.nan, 3.], dtype='float64') y = np.array([1., 1., 1.], dtype='float64') # if nan_treatment and not normalize_kernel: # with pytest.raises(ValueError): # z = convolve_fft(x, y, boundary=boundary, # nan_treatment=nan_treatment, # normalize_kernel=normalize_kernel, # ignore_edge_zeros=ignore_edge_zeros) # return z = convolve_fft(x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, preserve_nan=preserve_nan) if preserve_nan: assert np.isnan(z[1]) answer_dict = { 'sum': np.array([1., 4., 3.], dtype='float64'), 'sum_nozeros': np.array([1., 4., 3.], dtype='float64'), 'sum_zeros': np.array([1., 4., 3.], dtype='float64'), 'sum_nozeros_interpnan': np.array([1., 4., 3.], dtype='float64'), 'average': np.array([1., 2., 3.], dtype='float64'), 'sum_wrap': np.array([4., 4., 4.], dtype='float64'), 'average_wrap': np.array([4/3., 4/3., 4/3.], dtype='float64'), 'average_wrap_interpnan': np.array([2, 2, 2], dtype='float64'), 'average_nozeros': np.array([1/2., 4/3., 3/2.], dtype='float64'), # 'average_nozeros_interpnan': np.array([1 / 2., 4 / 3., 3 / 2.], dtype='float64'), 'average_nozeros_interpnan': np.array([1., 2., 3.], dtype='float64'), 'average_zeros': np.array([1 / 3., 4 / 3., 3 / 3.], dtype='float64'), 'average_zeros_interpnan': np.array([1 / 2., 4 / 2., 3 / 2.], dtype='float64'), } for key in list(answer_dict.keys()): if 'sum' in key: answer_dict[key+"_interpnan"] = answer_dict[key] * 3./2. if normalize_kernel: answer_key = 'average' else: answer_key = 'sum' if boundary == 'wrap': answer_key += '_wrap' else: # average = average_zeros; sum = sum_zeros answer_key += '_zeros' if nan_treatment == 'interpolate': answer_key += '_interpnan' posns = np.where(np.isfinite(z)) assert_array_almost_equal_nulp(z[posns], answer_dict[answer_key][posns], 10) def test_nan_fill(self): # Test masked array array = np.array([1., np.nan, 3.], dtype='float64') kernel = np.array([1, 1, 1]) masked_array = np.ma.masked_array(array, mask=[0, 1, 0]) result = convolve_fft(masked_array, kernel, boundary='fill', fill_value=np.nan) assert_allclose(result, [1, 2, 3], atol=1e-14) def test_masked_array(self): """ Check whether convolve_fft works with masked arrays. """ # Test masked array array = np.array([1., np.nan, 3.], dtype='float64') kernel = np.array([1, 1, 1]) masked_array = np.ma.masked_array(array, mask=[0, 1, 0]) result = convolve_fft(masked_array, kernel, boundary='fill', fill_value=np.nan) assert_allclose(result, [1, 2, 3], atol=1e-14) # Test masked kernel array = np.array([1., np.nan, 3.], dtype='float64') kernel = np.array([1, 1, 1]) masked_array = np.ma.masked_array(array, mask=[0, 1, 0]) result = convolve_fft(masked_array, kernel, boundary='fill', fill_value=np.nan) assert_allclose(result, [1, 2, 3], atol=1e-14) def test_normalize_function(self): """ Check if convolve_fft works when passing a normalize function. """ array = [1, 2, 3] kernel = [3, 3, 3] result = convolve_fft(array, kernel, normalize_kernel=np.max) assert_allclose(result, [3, 6, 5], atol=1e-14) @pytest.mark.parametrize(option_names, options) def test_normalization_is_respected(self, boundary, nan_treatment, normalize_kernel): """ Check that if normalize_kernel is False then the normalization tolerance is respected. """ array = np.array([1, 2, 3]) # A simple identity kernel to which a non-zero normalization is added. base_kernel = np.array([1.0]) # Use the same normalization error tolerance in all cases. normalization_rtol = 1e-4 # Add the error below to the kernel. norm_error = [normalization_rtol / 10, normalization_rtol * 10] for err in norm_error: kernel = base_kernel + err result = convolve_fft(array, kernel, normalize_kernel=normalize_kernel, nan_treatment=nan_treatment, normalization_zero_tol=normalization_rtol) if normalize_kernel: # Kernel has been normalized to 1. assert_allclose(result, array, atol=1e-14) else: # Kernel should not have been normalized... assert_allclose(result, array * kernel, atol=1e-14) class TestConvolve2D: @pytest.mark.parametrize(option_names, options) def test_unity_1x1_none(self, boundary, nan_treatment, normalize_kernel): ''' Test that a 1x1 unit kernel returns the same array ''' x = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]], dtype='float64') y = np.array([[1.]], dtype='float64') z = convolve_fft(x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel) assert_array_almost_equal_nulp(z, x, 10) @pytest.mark.parametrize(option_names, options) def test_unity_3x3(self, boundary, nan_treatment, normalize_kernel): ''' Test that a 3x3 unit kernel returns the same array (except when boundary is None). ''' x = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]], dtype='float64') y = np.array([[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]], dtype='float64') z = convolve_fft(x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel) assert_array_almost_equal_nulp(z, x, 10) @pytest.mark.parametrize(option_names, options) def test_uniform_3x3(self, boundary, nan_treatment, normalize_kernel): ''' Test that the different modes are producing the correct results using a 3x3 uniform kernel. ''' x = np.array([[0., 0., 3.], [1., 0., 0.], [0., 2., 0.]], dtype='float64') y = np.array([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]], dtype='float64') z = convolve_fft(x, y, boundary=boundary, nan_treatment=nan_treatment, fill_value=np.nan if normalize_kernel else 0, normalize_kernel=normalize_kernel) w = np.array([[4., 6., 4.], [6., 9., 6.], [4., 6., 4.]], dtype='float64') answer_dict = { 'sum': np.array([[1., 4., 3.], [3., 6., 5.], [3., 3., 2.]], dtype='float64'), 'sum_wrap': np.array([[6., 6., 6.], [6., 6., 6.], [6., 6., 6.]], dtype='float64'), } answer_dict['average'] = answer_dict['sum'] / w answer_dict['average_wrap'] = answer_dict['sum_wrap'] / 9. answer_dict['average_withzeros'] = answer_dict['sum'] / 9. answer_dict['sum_withzeros'] = answer_dict['sum'] if normalize_kernel: answer_key = 'average' else: answer_key = 'sum' if boundary == 'wrap': answer_key += '_wrap' elif nan_treatment == 'fill': answer_key += '_withzeros' a = answer_dict[answer_key] # for reasons unknown, the Windows FFT returns an answer for the [0, 0] # component that is EXACTLY 10*np.spacing assert np.all(np.abs(z - a) <= np.spacing(np.where(z > a, z, a)) * 10) @pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan) def test_unity_3x3_withnan(self, boundary, nan_treatment, normalize_kernel, preserve_nan): ''' Test that a 3x3 unit kernel returns the same array (except when boundary is None). This version includes a NaN value in the original array. ''' x = np.array([[1., 2., 3.], [4., np.nan, 6.], [7., 8., 9.]], dtype='float64') y = np.array([[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]], dtype='float64') z = convolve_fft(x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, preserve_nan=preserve_nan) if preserve_nan: assert np.isnan(z[1, 1]) x = np.nan_to_num(z) z = np.nan_to_num(z) a = x a[1, 1] = 0 # for whatever reason, numpy's fft has very limited precision, and # the comparison fails unless you cast the float64 to a float16 if hasattr(np, 'float16'): assert_array_almost_equal_nulp(np.asarray(z, dtype=np.float16), np.asarray(a, dtype=np.float16), 10) assert_allclose(z, a, atol=1e-14) @pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan) def test_uniform_3x3_withnan(self, boundary, nan_treatment, normalize_kernel, preserve_nan): ''' Test that the different modes are producing the correct results using a 3x3 uniform kernel. This version includes a NaN value in the original array. ''' x = np.array([[0., 0., 3.], [1., np.nan, 0.], [0., 2., 0.]], dtype='float64') y = np.array([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]], dtype='float64') # commented out: allow unnormalized nan-ignoring convolution # # kernel is not normalized, so this situation -> exception # if nan_treatment and not normalize_kernel: # with pytest.raises(ValueError): # z = convolve_fft(x, y, boundary=boundary, # nan_treatment=nan_treatment, # normalize_kernel=normalize_kernel, # ignore_edge_zeros=ignore_edge_zeros, # ) # return z = convolve_fft(x, y, boundary=boundary, nan_treatment=nan_treatment, fill_value=np.nan if normalize_kernel else 0, normalize_kernel=normalize_kernel, preserve_nan=preserve_nan) if preserve_nan: assert np.isnan(z[1, 1]) # weights w_n = np.array([[3., 5., 3.], [5., 8., 5.], [3., 5., 3.]], dtype='float64') w_z = np.array([[4., 6., 4.], [6., 9., 6.], [4., 6., 4.]], dtype='float64') answer_dict = { 'sum': np.array([[1., 4., 3.], [3., 6., 5.], [3., 3., 2.]], dtype='float64'), 'sum_wrap': np.array([[6., 6., 6.], [6., 6., 6.], [6., 6., 6.]], dtype='float64'), } answer_dict['average'] = answer_dict['sum'] / w_z answer_dict['average_interpnan'] = answer_dict['sum'] / w_n answer_dict['average_wrap_interpnan'] = answer_dict['sum_wrap'] / 8. answer_dict['average_wrap'] = answer_dict['sum_wrap'] / 9. answer_dict['average_withzeros'] = answer_dict['sum'] / 9. answer_dict['average_withzeros_interpnan'] = answer_dict['sum'] / 8. answer_dict['sum_withzeros'] = answer_dict['sum'] answer_dict['sum_interpnan'] = answer_dict['sum'] * 9/8. answer_dict['sum_withzeros_interpnan'] = answer_dict['sum'] answer_dict['sum_wrap_interpnan'] = answer_dict['sum_wrap'] * 9/8. if normalize_kernel: answer_key = 'average' else: answer_key = 'sum' if boundary == 'wrap': answer_key += '_wrap' elif nan_treatment == 'fill': answer_key += '_withzeros' if nan_treatment == 'interpolate': answer_key += '_interpnan' a = answer_dict[answer_key] # Skip the NaN at [1, 1] when preserve_nan=True posns = np.where(np.isfinite(z)) # for reasons unknown, the Windows FFT returns an answer for the [0, 0] # component that is EXACTLY 10*np.spacing assert np.all(np.abs(z - a)[posns] <= np.spacing(np.where(z > a, z, a))[posns] * 10) def test_big_fail(self): """ Test that convolve_fft raises an exception if a too-large array is passed in """ with pytest.raises((ValueError, MemoryError)): # while a good idea, this approach did not work; it actually writes to disk # arr = np.memmap('file.np', mode='w+', shape=(512, 512, 512), dtype=complex) # this just allocates the memory but never touches it; it's better: arr = np.empty([512, 512, 512], dtype=complex) # note 512**3 * 16 bytes = 2.0 GB convolve_fft(arr, arr) @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS) def test_non_normalized_kernel(self, boundary): x = np.array([[0., 0., 4.], [1., 2., 0.], [0., 3., 0.]], dtype='float') y = np.array([[1., -1., 1.], [-1., 0., -1.], [1., -1., 1.]], dtype='float') z = convolve_fft(x, y, boundary=boundary, nan_treatment='fill', normalize_kernel=False) if boundary in (None, 'fill'): assert_allclose(z, np.array([[1., -5., 2.], [1., 0., -3.], [-2., -1., -1.]], dtype='float'), atol=1e-14) elif boundary == 'wrap': assert_allclose(z, np.array([[0., -8., 6.], [5., 0., -4.], [2., 3., -4.]], dtype='float'), atol=1e-14) else: raise ValueError("Invalid boundary specification")
4a77be2b3e6ce61adf50dcc623a5a4170a5bc7727e7fd7b84e21bc49801ddecb
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from ... import convolution as conv from ...tests.helper import pickle_protocol, check_pickling_recovery # noqa @pytest.mark.parametrize(("name", "args", "kwargs", "xfail"), [(conv.CustomKernel, [], {'array': np.random.rand(15)}, False), (conv.Gaussian1DKernel, [1.0], {'x_size': 5}, True), (conv.Gaussian2DKernel, [1.0], {'x_size': 5, 'y_size': 5}, True), ]) def test_simple_object(pickle_protocol, name, args, kwargs, xfail): # Tests easily instantiated objects if xfail: pytest.xfail() original = name(*args, **kwargs) check_pickling_recovery(original, pickle_protocol)
d95b2ae72a9a0107dba9085a5ca1dba775c57ef78724cb66cd952f42c5876da3
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from ..convolve import convolve, convolve_fft from ..kernels import Gaussian2DKernel from ...nddata import NDData def test_basic_nddata(): arr = np.zeros((11, 11)) arr[5, 5] = 1 ndd = NDData(arr) test_kernel = Gaussian2DKernel(1) result = convolve(ndd, test_kernel) x, y = np.mgrid[:11, :11] expected = result[5, 5] * np.exp(-0.5 * ((x - 5)**2 + (y - 5)**2)) np.testing.assert_allclose(result, expected, atol=1e-6) resultf = convolve_fft(ndd, test_kernel) np.testing.assert_allclose(resultf, expected, atol=1e-6) @pytest.mark.parametrize('convfunc', [lambda *args: convolve(*args, nan_treatment='interpolate', normalize_kernel=True), lambda *args: convolve_fft(*args, nan_treatment='interpolate', normalize_kernel=True)]) def test_masked_nddata(convfunc): arr = np.zeros((11, 11)) arr[4, 5] = arr[6, 5] = arr[5, 4] = arr[5, 6] = 0.2 arr[5, 5] = 1.5 ndd_base = NDData(arr) mask = arr < 0 # this is all False mask[5, 5] = True ndd_mask = NDData(arr, mask=mask) arrnan = arr.copy() arrnan[5, 5] = np.nan ndd_nan = NDData(arrnan) test_kernel = Gaussian2DKernel(1) result_base = convfunc(ndd_base, test_kernel) result_nan = convfunc(ndd_nan, test_kernel) result_mask = convfunc(ndd_mask, test_kernel) assert np.allclose(result_nan, result_mask) assert not np.allclose(result_base, result_mask) assert not np.allclose(result_base, result_nan) # check to make sure the mask run doesn't talk back to the initial array assert np.sum(np.isnan(ndd_base.data)) != np.sum(np.isnan(ndd_nan.data))
4fe5f1d85ec122e54d8e52dfd5364d0395aedf49c9b80181ecb4e6f8ced02ac5
# Licensed under a 3-clause BSD style license - see LICENSE.rst import itertools import pytest import numpy as np from numpy.testing import assert_almost_equal from ..convolve import convolve, convolve_fft from ..kernels import Gaussian2DKernel, Box2DKernel, Tophat2DKernel from ..kernels import Moffat2DKernel SHAPES_ODD = [[15, 15], [31, 31]] SHAPES_EVEN = [[8, 8], [16, 16], [32, 32]] WIDTHS = [2, 3, 4, 5] KERNELS = [] for shape in SHAPES_ODD: for width in WIDTHS: KERNELS.append(Gaussian2DKernel(width, x_size=shape[0], y_size=shape[1], mode='oversample', factor=10)) KERNELS.append(Box2DKernel(width, x_size=shape[0], y_size=shape[1], mode='oversample', factor=10)) KERNELS.append(Tophat2DKernel(width, x_size=shape[0], y_size=shape[1], mode='oversample', factor=10)) KERNELS.append(Moffat2DKernel(width, 2, x_size=shape[0], y_size=shape[1], mode='oversample', factor=10)) class Test2DConvolutions: @pytest.mark.parametrize('kernel', KERNELS) def test_centered_makekernel(self, kernel): """ Test smoothing of an image with a single positive pixel """ shape = kernel.array.shape x = np.zeros(shape) xslice = [slice(sh // 2, sh // 2 + 1) for sh in shape] x[xslice] = 1.0 c2 = convolve_fft(x, kernel, boundary='fill') c1 = convolve(x, kernel, boundary='fill') assert_almost_equal(c1, c2, decimal=12) @pytest.mark.parametrize('kernel', KERNELS) def test_random_makekernel(self, kernel): """ Test smoothing of an image made of random noise """ shape = kernel.array.shape x = np.random.randn(*shape) c2 = convolve_fft(x, kernel, boundary='fill') c1 = convolve(x, kernel, boundary='fill') # not clear why, but these differ by a couple ulps... assert_almost_equal(c1, c2, decimal=12) @pytest.mark.parametrize(('shape', 'width'), list(itertools.product(SHAPES_ODD, WIDTHS))) def test_uniform_smallkernel(self, shape, width): """ Test smoothing of an image with a single positive pixel Uses a simple, small kernel """ if width % 2 == 0: # convolve does not accept odd-shape kernels return kernel = np.ones([width, width]) x = np.zeros(shape) xslice = [slice(sh // 2, sh // 2 + 1) for sh in shape] x[xslice] = 1.0 c2 = convolve_fft(x, kernel, boundary='fill') c1 = convolve(x, kernel, boundary='fill') assert_almost_equal(c1, c2, decimal=12) @pytest.mark.parametrize(('shape', 'width'), list(itertools.product(SHAPES_ODD, [1, 3, 5]))) def test_smallkernel_Box2DKernel(self, shape, width): """ Test smoothing of an image with a single positive pixel Compares a small uniform kernel to the Box2DKernel """ kernel1 = np.ones([width, width]) / float(width) ** 2 kernel2 = Box2DKernel(width, mode='oversample', factor=10) x = np.zeros(shape) xslice = [slice(sh // 2, sh // 2 + 1) for sh in shape] x[xslice] = 1.0 c2 = convolve_fft(x, kernel2, boundary='fill') c1 = convolve_fft(x, kernel1, boundary='fill') assert_almost_equal(c1, c2, decimal=12) c2 = convolve(x, kernel2, boundary='fill') c1 = convolve(x, kernel1, boundary='fill') assert_almost_equal(c1, c2, decimal=12)
e23511e301fb07336c3f357cc365e53a9dccde48ee667864d939c6be25df0e69
# Licensed under a 3-clause BSD style license - see LICENSE.rst import itertools import pytest import numpy as np from numpy.testing import assert_almost_equal, assert_allclose from ..convolve import convolve, convolve_fft from ..kernels import ( Gaussian1DKernel, Gaussian2DKernel, Box1DKernel, Box2DKernel, Trapezoid1DKernel, TrapezoidDisk2DKernel, MexicanHat1DKernel, Tophat2DKernel, MexicanHat2DKernel, AiryDisk2DKernel, Ring2DKernel, CustomKernel, Model1DKernel, Model2DKernel, Kernel1D, Kernel2D) from ..utils import KernelSizeError from ...modeling.models import Box2D, Gaussian1D, Gaussian2D from ...utils.exceptions import AstropyDeprecationWarning from ...tests.helper import catch_warnings try: from scipy.ndimage import filters HAS_SCIPY = True except ImportError: HAS_SCIPY = False WIDTHS_ODD = [3, 5, 7, 9] WIDTHS_EVEN = [2, 4, 8, 16] MODES = ['center', 'linear_interp', 'oversample', 'integrate'] KERNEL_TYPES = [Gaussian1DKernel, Gaussian2DKernel, Box1DKernel, Box2DKernel, Trapezoid1DKernel, TrapezoidDisk2DKernel, MexicanHat1DKernel, Tophat2DKernel, AiryDisk2DKernel, Ring2DKernel] NUMS = [1, 1., np.float32(1.), np.float64(1.)] # Test data delta_pulse_1D = np.zeros(81) delta_pulse_1D[40] = 1 delta_pulse_2D = np.zeros((81, 81)) delta_pulse_2D[40, 40] = 1 random_data_1D = np.random.rand(61) random_data_2D = np.random.rand(61, 61) class TestKernels: """ Test class for the built-in convolution kernels. """ @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.parametrize(('width'), WIDTHS_ODD) def test_scipy_filter_gaussian(self, width): """ Test GaussianKernel against SciPy ndimage gaussian filter. """ gauss_kernel_1D = Gaussian1DKernel(width) gauss_kernel_1D.normalize() gauss_kernel_2D = Gaussian2DKernel(width) gauss_kernel_2D.normalize() astropy_1D = convolve(delta_pulse_1D, gauss_kernel_1D, boundary='fill') astropy_2D = convolve(delta_pulse_2D, gauss_kernel_2D, boundary='fill') scipy_1D = filters.gaussian_filter(delta_pulse_1D, width) scipy_2D = filters.gaussian_filter(delta_pulse_2D, width) assert_almost_equal(astropy_1D, scipy_1D, decimal=12) assert_almost_equal(astropy_2D, scipy_2D, decimal=12) @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.parametrize(('width'), WIDTHS_ODD) def test_scipy_filter_gaussian_laplace(self, width): """ Test MexicanHat kernels against SciPy ndimage gaussian laplace filters. """ mexican_kernel_1D = MexicanHat1DKernel(width) mexican_kernel_2D = MexicanHat2DKernel(width) astropy_1D = convolve(delta_pulse_1D, mexican_kernel_1D, boundary='fill', normalize_kernel=False) astropy_2D = convolve(delta_pulse_2D, mexican_kernel_2D, boundary='fill', normalize_kernel=False) with pytest.raises(Exception) as exc: astropy_1D = convolve(delta_pulse_1D, mexican_kernel_1D, boundary='fill', normalize_kernel=True) assert 'sum is close to zero' in exc.value.args[0] with pytest.raises(Exception) as exc: astropy_2D = convolve(delta_pulse_2D, mexican_kernel_2D, boundary='fill', normalize_kernel=True) assert 'sum is close to zero' in exc.value.args[0] # The Laplace of Gaussian filter is an inverted Mexican Hat # filter. scipy_1D = -filters.gaussian_laplace(delta_pulse_1D, width) scipy_2D = -filters.gaussian_laplace(delta_pulse_2D, width) # There is a slight deviation in the normalization. They differ by a # factor of ~1.0000284132604045. The reason is not known. assert_almost_equal(astropy_1D, scipy_1D, decimal=5) assert_almost_equal(astropy_2D, scipy_2D, decimal=5) @pytest.mark.parametrize(('kernel_type', 'width'), list(itertools.product(KERNEL_TYPES, WIDTHS_ODD))) def test_delta_data(self, kernel_type, width): """ Test smoothing of an image with a single positive pixel """ if kernel_type == AiryDisk2DKernel and not HAS_SCIPY: pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy") if not kernel_type == Ring2DKernel: kernel = kernel_type(width) else: kernel = kernel_type(width, width * 0.2) if kernel.dimension == 1: c1 = convolve_fft(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False) c2 = convolve(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False) assert_almost_equal(c1, c2, decimal=12) else: c1 = convolve_fft(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False) c2 = convolve(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False) assert_almost_equal(c1, c2, decimal=12) @pytest.mark.parametrize(('kernel_type', 'width'), list(itertools.product(KERNEL_TYPES, WIDTHS_ODD))) def test_random_data(self, kernel_type, width): """ Test smoothing of an image made of random noise """ if kernel_type == AiryDisk2DKernel and not HAS_SCIPY: pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy") if not kernel_type == Ring2DKernel: kernel = kernel_type(width) else: kernel = kernel_type(width, width * 0.2) if kernel.dimension == 1: c1 = convolve_fft(random_data_1D, kernel, boundary='fill', normalize_kernel=False) c2 = convolve(random_data_1D, kernel, boundary='fill', normalize_kernel=False) assert_almost_equal(c1, c2, decimal=12) else: c1 = convolve_fft(random_data_2D, kernel, boundary='fill', normalize_kernel=False) c2 = convolve(random_data_2D, kernel, boundary='fill', normalize_kernel=False) assert_almost_equal(c1, c2, decimal=12) @pytest.mark.parametrize(('width'), WIDTHS_ODD) def test_uniform_smallkernel(self, width): """ Test smoothing of an image with a single positive pixel Instead of using kernel class, uses a simple, small kernel """ kernel = np.ones([width, width]) c2 = convolve_fft(delta_pulse_2D, kernel, boundary='fill') c1 = convolve(delta_pulse_2D, kernel, boundary='fill') assert_almost_equal(c1, c2, decimal=12) @pytest.mark.parametrize(('width'), WIDTHS_ODD) def test_smallkernel_vs_Box2DKernel(self, width): """ Test smoothing of an image with a single positive pixel """ kernel1 = np.ones([width, width]) / width ** 2 kernel2 = Box2DKernel(width) c2 = convolve_fft(delta_pulse_2D, kernel2, boundary='fill') c1 = convolve_fft(delta_pulse_2D, kernel1, boundary='fill') assert_almost_equal(c1, c2, decimal=12) def test_convolve_1D_kernels(self): """ Check if convolving two kernels with each other works correctly. """ gauss_1 = Gaussian1DKernel(3) gauss_2 = Gaussian1DKernel(4) test_gauss_3 = Gaussian1DKernel(5) gauss_3 = convolve(gauss_1, gauss_2) assert np.all(np.abs((gauss_3 - test_gauss_3).array) < 0.01) def test_convolve_2D_kernels(self): """ Check if convolving two kernels with each other works correctly. """ gauss_1 = Gaussian2DKernel(3) gauss_2 = Gaussian2DKernel(4) test_gauss_3 = Gaussian2DKernel(5) gauss_3 = convolve(gauss_1, gauss_2) assert np.all(np.abs((gauss_3 - test_gauss_3).array) < 0.01) @pytest.mark.parametrize(('number'), NUMS) def test_multiply_scalar(self, number): """ Check if multiplying a kernel with a scalar works correctly. """ gauss = Gaussian1DKernel(3) gauss_new = number * gauss assert_almost_equal(gauss_new.array, gauss.array * number, decimal=12) @pytest.mark.parametrize(('number'), NUMS) def test_multiply_scalar_type(self, number): """ Check if multiplying a kernel with a scalar works correctly. """ gauss = Gaussian1DKernel(3) gauss_new = number * gauss assert type(gauss_new) is Gaussian1DKernel @pytest.mark.parametrize(('number'), NUMS) def test_rmultiply_scalar_type(self, number): """ Check if multiplying a kernel with a scalar works correctly. """ gauss = Gaussian1DKernel(3) gauss_new = gauss * number assert type(gauss_new) is Gaussian1DKernel def test_multiply_kernel1d(self): """Test that multiplying two 1D kernels raises an exception.""" gauss = Gaussian1DKernel(3) with pytest.raises(Exception): gauss * gauss def test_multiply_kernel2d(self): """Test that multiplying two 2D kernels raises an exception.""" gauss = Gaussian2DKernel(3) with pytest.raises(Exception): gauss * gauss def test_multiply_kernel1d_kernel2d(self): """ Test that multiplying a 1D kernel with a 2D kernel raises an exception. """ with pytest.raises(Exception): Gaussian1DKernel(3) * Gaussian2DKernel(3) def test_add_kernel_scalar(self): """Test that adding a scalar to a kernel raises an exception.""" with pytest.raises(Exception): Gaussian1DKernel(3) + 1 def test_model_1D_kernel(self): """ Check Model1DKernel against Gaussian1Dkernel """ stddev = 5. gauss = Gaussian1D(1. / np.sqrt(2 * np.pi * stddev**2), 0, stddev) model_gauss_kernel = Model1DKernel(gauss, x_size=21) gauss_kernel = Gaussian1DKernel(stddev, x_size=21) assert_almost_equal(model_gauss_kernel.array, gauss_kernel.array, decimal=12) def test_model_2D_kernel(self): """ Check Model2DKernel against Gaussian2Dkernel """ stddev = 5. gauss = Gaussian2D(1. / (2 * np.pi * stddev**2), 0, 0, stddev, stddev) model_gauss_kernel = Model2DKernel(gauss, x_size=21) gauss_kernel = Gaussian2DKernel(stddev, x_size=21) assert_almost_equal(model_gauss_kernel.array, gauss_kernel.array, decimal=12) def test_custom_1D_kernel(self): """ Check CustomKernel against Box1DKernel. """ # Define one dimensional array: array = np.ones(5) custom = CustomKernel(array) custom.normalize() box = Box1DKernel(5) c2 = convolve(delta_pulse_1D, custom, boundary='fill') c1 = convolve(delta_pulse_1D, box, boundary='fill') assert_almost_equal(c1, c2, decimal=12) def test_custom_2D_kernel(self): """ Check CustomKernel against Box2DKernel. """ # Define one dimensional array: array = np.ones((5, 5)) custom = CustomKernel(array) custom.normalize() box = Box2DKernel(5) c2 = convolve(delta_pulse_2D, custom, boundary='fill') c1 = convolve(delta_pulse_2D, box, boundary='fill') assert_almost_equal(c1, c2, decimal=12) def test_custom_1D_kernel_list(self): """ Check if CustomKernel works with lists. """ custom = CustomKernel([1, 1, 1, 1, 1]) assert custom.is_bool is True def test_custom_2D_kernel_list(self): """ Check if CustomKernel works with lists. """ custom = CustomKernel([[1, 1, 1], [1, 1, 1], [1, 1, 1]]) assert custom.is_bool is True def test_custom_1D_kernel_zerosum(self): """ Check if CustomKernel works when the input array/list sums to zero. """ array = [-2, -1, 0, 1, 2] custom = CustomKernel(array) custom.normalize() assert custom.truncation == 0. assert custom._kernel_sum == 0. def test_custom_2D_kernel_zerosum(self): """ Check if CustomKernel works when the input array/list sums to zero. """ array = [[0, -1, 0], [-1, 4, -1], [0, -1, 0]] custom = CustomKernel(array) custom.normalize() assert custom.truncation == 0. assert custom._kernel_sum == 0. def test_custom_kernel_odd_error(self): """ Check if CustomKernel raises if the array size is odd. """ with pytest.raises(KernelSizeError): CustomKernel([1, 1, 1, 1]) def test_add_1D_kernels(self): """ Check if adding of two 1D kernels works. """ box_1 = Box1DKernel(5) box_2 = Box1DKernel(3) box_3 = Box1DKernel(1) box_sum_1 = box_1 + box_2 + box_3 box_sum_2 = box_2 + box_3 + box_1 box_sum_3 = box_3 + box_1 + box_2 ref = [1/5., 1/5. + 1/3., 1 + 1/3. + 1/5., 1/5. + 1/3., 1/5.] assert_almost_equal(box_sum_1.array, ref, decimal=12) assert_almost_equal(box_sum_2.array, ref, decimal=12) assert_almost_equal(box_sum_3.array, ref, decimal=12) # Assert that the kernels haven't changed assert_almost_equal(box_1.array, [0.2, 0.2, 0.2, 0.2, 0.2], decimal=12) assert_almost_equal(box_2.array, [1/3., 1/3., 1/3.], decimal=12) assert_almost_equal(box_3.array, [1], decimal=12) def test_add_2D_kernels(self): """ Check if adding of two 1D kernels works. """ box_1 = Box2DKernel(3) box_2 = Box2DKernel(1) box_sum_1 = box_1 + box_2 box_sum_2 = box_2 + box_1 ref = [[1 / 9., 1 / 9., 1 / 9.], [1 / 9., 1 + 1 / 9., 1 / 9.], [1 / 9., 1 / 9., 1 / 9.]] ref_1 = [[1 / 9., 1 / 9., 1 / 9.], [1 / 9., 1 / 9., 1 / 9.], [1 / 9., 1 / 9., 1 / 9.]] assert_almost_equal(box_2.array, [[1]], decimal=12) assert_almost_equal(box_1.array, ref_1, decimal=12) assert_almost_equal(box_sum_1.array, ref, decimal=12) assert_almost_equal(box_sum_2.array, ref, decimal=12) def test_Gaussian1DKernel_even_size(self): """ Check if even size for GaussianKernel works. """ gauss = Gaussian1DKernel(3, x_size=10) assert gauss.array.size == 10 def test_Gaussian2DKernel_even_size(self): """ Check if even size for GaussianKernel works. """ gauss = Gaussian2DKernel(3, x_size=10, y_size=10) assert gauss.array.shape == (10, 10) # https://github.com/astropy/astropy/issues/3605 def test_Gaussian2DKernel_rotated(self): with catch_warnings(AstropyDeprecationWarning) as w: Gaussian2DKernel(stddev=10) assert len(w) == 1 gauss = Gaussian2DKernel( x_stddev=3, y_stddev=1.5, theta=0.7853981633974483, x_size=5, y_size=5) # rotated 45 deg ccw ans = [[0.02267712, 0.02464785, 0.02029238, 0.01265463, 0.00597762], [0.02464785, 0.03164847, 0.03078144, 0.02267712, 0.01265463], [0.02029238, 0.03078144, 0.03536777, 0.03078144, 0.02029238], [0.01265463, 0.02267712, 0.03078144, 0.03164847, 0.02464785], [0.00597762, 0.01265463, 0.02029238, 0.02464785, 0.02267712]] assert_allclose(gauss, ans, rtol=0.001) # Rough comparison at 0.1 % def test_normalize_peak(self): """ Check if normalize works with peak mode. """ custom = CustomKernel([1, 2, 3, 2, 1]) custom.normalize(mode='peak') assert custom.array.max() == 1 def test_check_kernel_attributes(self): """ Check if kernel attributes are correct. """ box = Box2DKernel(5) # Check truncation assert box.truncation == 0 # Check model assert isinstance(box.model, Box2D) # Check center assert box.center == [2, 2] # Check normalization box.normalize() assert_almost_equal(box._kernel_sum, 1., decimal=12) # Check separability assert box.separable @pytest.mark.parametrize(('kernel_type', 'mode'), list(itertools.product(KERNEL_TYPES, MODES))) def test_discretize_modes(self, kernel_type, mode): """ Check if the different modes result in kernels that work with convolve. Use only small kernel width, to make the test pass quickly. """ if kernel_type == AiryDisk2DKernel and not HAS_SCIPY: pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy") if not kernel_type == Ring2DKernel: kernel = kernel_type(3) else: kernel = kernel_type(3, 3 * 0.2) if kernel.dimension == 1: c1 = convolve_fft(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False) c2 = convolve(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False) assert_almost_equal(c1, c2, decimal=12) else: c1 = convolve_fft(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False) c2 = convolve(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False) assert_almost_equal(c1, c2, decimal=12) @pytest.mark.parametrize(('width'), WIDTHS_EVEN) def test_box_kernels_even_size(self, width): """ Check if BoxKernel work properly with even sizes. """ kernel_1D = Box1DKernel(width) assert kernel_1D.shape[0] % 2 != 0 assert kernel_1D.array.sum() == 1. kernel_2D = Box2DKernel(width) assert np.all([_ % 2 != 0 for _ in kernel_2D.shape]) assert kernel_2D.array.sum() == 1. def test_kernel_normalization(self): """ Test that repeated normalizations do not change the kernel [#3747]. """ kernel = CustomKernel(np.ones(5)) kernel.normalize() data = np.copy(kernel.array) kernel.normalize() assert_allclose(data, kernel.array) kernel.normalize() assert_allclose(data, kernel.array) def test_kernel_normalization_mode(self): """ Test that an error is raised if mode is invalid. """ with pytest.raises(ValueError): kernel = CustomKernel(np.ones(3)) kernel.normalize(mode='invalid') def test_kernel1d_int_size(self): """ Test that an error is raised if ``Kernel1D`` ``x_size`` is not an integer. """ with pytest.raises(TypeError): Gaussian1DKernel(3, x_size=1.2) def test_kernel2d_int_xsize(self): """ Test that an error is raised if ``Kernel2D`` ``x_size`` is not an integer. """ with pytest.raises(TypeError): Gaussian2DKernel(3, x_size=1.2) def test_kernel2d_int_ysize(self): """ Test that an error is raised if ``Kernel2D`` ``y_size`` is not an integer. """ with pytest.raises(TypeError): Gaussian2DKernel(3, x_size=5, y_size=1.2) def test_kernel1d_initialization(self): """ Test that an error is raised if an array or model is not specified for ``Kernel1D``. """ with pytest.raises(TypeError): Kernel1D() def test_kernel2d_initialization(self): """ Test that an error is raised if an array or model is not specified for ``Kernel2D``. """ with pytest.raises(TypeError): Kernel2D()
8e47004cb750a65dd0f4cb602d4c03091d8cef8338fe5092646d10b01cfcc728
# Licensed under a 3-clause BSD style license - see LICENSE.rst import itertools import pytest import numpy as np from numpy.testing import assert_allclose from ..utils import discretize_model from ...modeling.functional_models import ( Gaussian1D, Box1D, MexicanHat1D, Gaussian2D, Box2D, MexicanHat2D) from ...modeling.tests.example_models import models_1D, models_2D from ...modeling.tests.test_models import create_model try: import scipy # pylint: disable=W0611 HAS_SCIPY = True except ImportError: HAS_SCIPY = False modes = ['center', 'linear_interp', 'oversample'] test_models_1D = [Gaussian1D, Box1D, MexicanHat1D] test_models_2D = [Gaussian2D, Box2D, MexicanHat2D] @pytest.mark.parametrize(('model_class', 'mode'), list(itertools.product(test_models_1D, modes))) def test_pixel_sum_1D(model_class, mode): """ Test if the sum of all pixels corresponds nearly to the integral. """ if model_class == Box1D and mode == "center": pytest.skip("Non integrating mode. Skip integral test.") parameters = models_1D[model_class] model = create_model(model_class, parameters) values = discretize_model(model, models_1D[model_class]['x_lim'], mode=mode) assert_allclose(values.sum(), models_1D[model_class]['integral'], atol=0.0001) @pytest.mark.parametrize('mode', modes) def test_gaussian_eval_1D(mode): """ Discretize Gaussian with different modes and check if result is at least similar to Gaussian1D.eval(). """ model = Gaussian1D(1, 0, 20) x = np.arange(-100, 101) values = model(x) disc_values = discretize_model(model, (-100, 101), mode=mode) assert_allclose(values, disc_values, atol=0.001) @pytest.mark.parametrize(('model_class', 'mode'), list(itertools.product(test_models_2D, modes))) def test_pixel_sum_2D(model_class, mode): """ Test if the sum of all pixels corresponds nearly to the integral. """ if model_class == Box2D and mode == "center": pytest.skip("Non integrating mode. Skip integral test.") parameters = models_2D[model_class] model = create_model(model_class, parameters) values = discretize_model(model, models_2D[model_class]['x_lim'], models_2D[model_class]['y_lim'], mode=mode) assert_allclose(values.sum(), models_2D[model_class]['integral'], atol=0.0001) @pytest.mark.parametrize('mode', modes) def test_gaussian_eval_2D(mode): """ Discretize Gaussian with different modes and check if result is at least similar to Gaussian2D.eval() """ model = Gaussian2D(0.01, 0, 0, 1, 1) x = np.arange(-2, 3) y = np.arange(-2, 3) x, y = np.meshgrid(x, y) values = model(x, y) disc_values = discretize_model(model, (-2, 3), (-2, 3), mode=mode) assert_allclose(values, disc_values, atol=1e-2) @pytest.mark.skipif('not HAS_SCIPY') def test_gaussian_eval_2D_integrate_mode(): """ Discretize Gaussian with integrate mode """ model_list = [Gaussian2D(.01, 0, 0, 2, 2), Gaussian2D(.01, 0, 0, 1, 2), Gaussian2D(.01, 0, 0, 2, 1)] x = np.arange(-2, 3) y = np.arange(-2, 3) x, y = np.meshgrid(x, y) for model in model_list: values = model(x, y) disc_values = discretize_model(model, (-2, 3), (-2, 3), mode='integrate') assert_allclose(values, disc_values, atol=1e-2) @pytest.mark.skipif('not HAS_SCIPY') def test_subpixel_gauss_1D(): """ Test subpixel accuracy of the integrate mode with gaussian 1D model. """ gauss_1D = Gaussian1D(1, 0, 0.1) values = discretize_model(gauss_1D, (-1, 2), mode='integrate', factor=100) assert_allclose(values.sum(), np.sqrt(2 * np.pi) * 0.1, atol=0.00001) @pytest.mark.skipif('not HAS_SCIPY') def test_subpixel_gauss_2D(): """ Test subpixel accuracy of the integrate mode with gaussian 2D model. """ gauss_2D = Gaussian2D(1, 0, 0, 0.1, 0.1) values = discretize_model(gauss_2D, (-1, 2), (-1, 2), mode='integrate', factor=100) assert_allclose(values.sum(), 2 * np.pi * 0.01, atol=0.00001) def test_discretize_callable_1d(): """ Test discretize when a 1d function is passed. """ def f(x): return x ** 2 y = discretize_model(f, (-5, 6)) assert_allclose(y, np.arange(-5, 6) ** 2) def test_discretize_callable_2d(): """ Test discretize when a 2d function is passed. """ def f(x, y): return x ** 2 + y ** 2 actual = discretize_model(f, (-5, 6), (-5, 6)) y, x = (np.indices((11, 11)) - 5) desired = x ** 2 + y ** 2 assert_allclose(actual, desired) def test_type_exception(): """ Test type exception. """ with pytest.raises(TypeError) as exc: discretize_model(float(0), (-10, 11)) assert exc.value.args[0] == 'Model must be callable.' def test_dim_exception_1d(): """ Test dimension exception 1d. """ def f(x): return x ** 2 with pytest.raises(ValueError) as exc: discretize_model(f, (-10, 11), (-10, 11)) assert exc.value.args[0] == "y range specified, but model is only 1-d." def test_dim_exception_2d(): """ Test dimension exception 2d. """ def f(x, y): return x ** 2 + y ** 2 with pytest.raises(ValueError) as exc: discretize_model(f, (-10, 11)) assert exc.value.args[0] == "y range not specified, but model is 2-d" def test_float_x_range_exception(): def f(x, y): return x ** 2 + y ** 2 with pytest.raises(ValueError) as exc: discretize_model(f, (-10.002, 11.23)) assert exc.value.args[0] == ("The difference between the upper an lower" " limit of 'x_range' must be a whole number.") def test_float_y_range_exception(): def f(x, y): return x ** 2 + y ** 2 with pytest.raises(ValueError) as exc: discretize_model(f, (-10, 11), (-10.002, 11.23)) assert exc.value.args[0] == ("The difference between the upper an lower" " limit of 'y_range' must be a whole number.")
d6f78ac6f3c1e0eb5e080517cf4a50027dd44a79cf6b4f6674ebc5a9f8156988
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from ..convolve import convolve, convolve_fft from numpy.testing import assert_array_almost_equal_nulp, assert_array_almost_equal import itertools VALID_DTYPES = [] for dtype_array in ['>f4', '<f4', '>f8', '<f8']: for dtype_kernel in ['>f4', '<f4', '>f8', '<f8']: VALID_DTYPES.append((dtype_array, dtype_kernel)) BOUNDARY_OPTIONS = [None, 'fill', 'wrap', 'extend'] NANHANDLING_OPTIONS = ['interpolate', 'fill'] NORMALIZE_OPTIONS = [True, False] PRESERVE_NAN_OPTIONS = [True, False] BOUNDARIES_AND_CONVOLUTIONS = (list(zip(itertools.cycle((convolve,)), BOUNDARY_OPTIONS)) + [(convolve_fft, 'wrap'), (convolve_fft, 'fill')]) HAS_SCIPY = True try: import scipy except ImportError: HAS_SCIPY = False class TestConvolve1D: def test_list(self): """ Test that convolve works correctly when inputs are lists """ x = [1, 4, 5, 6, 5, 7, 8] y = [0.2, 0.6, 0.2] z = convolve(x, y, boundary=None) assert_array_almost_equal_nulp(z, np.array([0., 3.6, 5., 5.6, 5.6, 6.8, 0.]), 10) def test_input_unmodified(self): """ Test that convolve works correctly when inputs are lists """ inlist = [1, 4, 5, 6, 5, 7, 8] x = np.array(inlist) y = [0.2, 0.6, 0.2] z = convolve(x, y, boundary=None) assert np.all(np.array(inlist) == x) @pytest.mark.parametrize(('dtype_array', 'dtype_kernel'), VALID_DTYPES) def test_dtype(self, dtype_array, dtype_kernel): ''' Test that 32- and 64-bit floats are correctly handled ''' x = np.array([1., 2., 3.], dtype=dtype_array) y = np.array([0., 1., 0.], dtype=dtype_kernel) z = convolve(x, y) assert x.dtype == z.dtype @pytest.mark.parametrize(('convfunc', 'boundary',), BOUNDARIES_AND_CONVOLUTIONS) def test_unity_1_none(self, boundary, convfunc): ''' Test that a unit kernel with a single element returns the same array ''' x = np.array([1., 2., 3.], dtype='>f8') y = np.array([1.], dtype='>f8') z = convfunc(x, y, boundary=boundary) np.testing.assert_allclose(z, x) @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS) def test_unity_3(self, boundary): ''' Test that a unit kernel with three elements returns the same array (except when boundary is None). ''' x = np.array([1., 2., 3.], dtype='>f8') y = np.array([0., 1., 0.], dtype='>f8') z = convolve(x, y, boundary=boundary) if boundary is None: assert np.all(z == np.array([0., 2., 0.], dtype='>f8')) else: assert np.all(z == x) @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS) def test_uniform_3(self, boundary): ''' Test that the different modes are producing the correct results using a uniform kernel with three elements ''' x = np.array([1., 0., 3.], dtype='>f8') y = np.array([1., 1., 1.], dtype='>f8') z = convolve(x, y, boundary=boundary, normalize_kernel=False) if boundary is None: assert np.all(z == np.array([0., 4., 0.], dtype='>f8')) elif boundary == 'fill': assert np.all(z == np.array([1., 4., 3.], dtype='>f8')) elif boundary == 'wrap': assert np.all(z == np.array([4., 4., 4.], dtype='>f8')) else: assert np.all(z == np.array([2., 4., 6.], dtype='>f8')) @pytest.mark.parametrize(('boundary', 'nan_treatment', 'normalize_kernel', 'preserve_nan'), itertools.product(BOUNDARY_OPTIONS, NANHANDLING_OPTIONS, NORMALIZE_OPTIONS, PRESERVE_NAN_OPTIONS)) def test_unity_3_withnan(self, boundary, nan_treatment, normalize_kernel, preserve_nan): ''' Test that a unit kernel with three elements returns the same array (except when boundary is None). This version includes a NaN value in the original array. ''' x = np.array([1., np.nan, 3.], dtype='>f8') y = np.array([0., 1., 0.], dtype='>f8') z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, preserve_nan=preserve_nan) if preserve_nan: assert np.isnan(z[1]) x = np.nan_to_num(z) z = np.nan_to_num(z) if boundary is None: assert np.all(z == np.array([0., 0., 0.], dtype='>f8')) else: assert np.all(z == x) @pytest.mark.parametrize(('boundary', 'nan_treatment', 'normalize_kernel', 'preserve_nan'), itertools.product(BOUNDARY_OPTIONS, NANHANDLING_OPTIONS, NORMALIZE_OPTIONS, PRESERVE_NAN_OPTIONS)) def test_uniform_3_withnan(self, boundary, nan_treatment, normalize_kernel, preserve_nan): ''' Test that the different modes are producing the correct results using a uniform kernel with three elements. This version includes a NaN value in the original array. ''' x = np.array([1., np.nan, 3.], dtype='>f8') y = np.array([1., 1., 1.], dtype='>f8') z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, preserve_nan=preserve_nan) if preserve_nan: assert np.isnan(z[1]) z = np.nan_to_num(z) # boundary, nan_treatment, normalize_kernel rslt = { (None, 'interpolate', True): [0, 2, 0], (None, 'interpolate', False): [0, 6, 0], (None, 'fill', True): [0, 4/3., 0], (None, 'fill', False): [0, 4, 0], ('fill', 'interpolate', True): [1/2., 2, 3/2.], ('fill', 'interpolate', False): [3/2., 6, 9/2.], ('fill', 'fill', True): [1/3., 4/3., 3/3.], ('fill', 'fill', False): [1, 4, 3], ('wrap', 'interpolate', True): [2, 2, 2], ('wrap', 'interpolate', False): [6, 6, 6], ('wrap', 'fill', True): [4/3., 4/3., 4/3.], ('wrap', 'fill', False): [4, 4, 4], ('extend', 'interpolate', True): [1, 2, 3], ('extend', 'interpolate', False): [3, 6, 9], ('extend', 'fill', True): [2/3., 4/3., 6/3.], ('extend', 'fill', False): [2, 4, 6], }[boundary, nan_treatment, normalize_kernel] if preserve_nan: rslt[1] = 0 assert_array_almost_equal_nulp(z, np.array(rslt, dtype='>f8'), 10) class TestConvolve2D: def test_list(self): """ Test that convolve works correctly when inputs are lists """ x = [[1, 1, 1], [1, 1, 1], [1, 1, 1]] z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=True) assert_array_almost_equal_nulp(z, x, 10) z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=False) assert_array_almost_equal_nulp(z, np.array(x, float)*9, 10) @pytest.mark.parametrize(('dtype_array', 'dtype_kernel'), VALID_DTYPES) def test_dtype(self, dtype_array, dtype_kernel): ''' Test that 32- and 64-bit floats are correctly handled ''' x = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]], dtype=dtype_array) y = np.array([[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]], dtype=dtype_kernel) z = convolve(x, y) assert x.dtype == z.dtype @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS) def test_unity_1x1_none(self, boundary): ''' Test that a 1x1 unit kernel returns the same array ''' x = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]], dtype='>f8') y = np.array([[1.]], dtype='>f8') z = convolve(x, y, boundary=boundary) assert np.all(z == x) @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS) def test_unity_3x3(self, boundary): ''' Test that a 3x3 unit kernel returns the same array (except when boundary is None). ''' x = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]], dtype='>f8') y = np.array([[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]], dtype='>f8') z = convolve(x, y, boundary=boundary) if boundary is None: assert np.all(z == np.array([[0., 0., 0.], [0., 5., 0.], [0., 0., 0.]], dtype='>f8')) else: assert np.all(z == x) @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS) def test_uniform_3x3(self, boundary): ''' Test that the different modes are producing the correct results using a 3x3 uniform kernel. ''' x = np.array([[0., 0., 3.], [1., 0., 0.], [0., 2., 0.]], dtype='>f8') y = np.array([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]], dtype='>f8') z = convolve(x, y, boundary=boundary, normalize_kernel=False) if boundary is None: assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.], [0., 6., 0.], [0., 0., 0.]], dtype='>f8'), 10) elif boundary == 'fill': assert_array_almost_equal_nulp(z, np.array([[1., 4., 3.], [3., 6., 5.], [3., 3., 2.]], dtype='>f8'), 10) elif boundary == 'wrap': assert_array_almost_equal_nulp(z, np.array([[6., 6., 6.], [6., 6., 6.], [6., 6., 6.]], dtype='>f8'), 10) else: assert_array_almost_equal_nulp(z, np.array([[2., 7., 12.], [4., 6., 8.], [6., 5., 4.]], dtype='>f8'), 10) @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS) def test_unity_3x3_withnan(self, boundary): ''' Test that a 3x3 unit kernel returns the same array (except when boundary is None). This version includes a NaN value in the original array. ''' x = np.array([[1., 2., 3.], [4., np.nan, 6.], [7., 8., 9.]], dtype='>f8') y = np.array([[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]], dtype='>f8') z = convolve(x, y, boundary=boundary, nan_treatment='fill', preserve_nan=True) assert np.isnan(z[1, 1]) x = np.nan_to_num(z) z = np.nan_to_num(z) if boundary is None: assert np.all(z == np.array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype='>f8')) else: assert np.all(z == x) @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS) def test_uniform_3x3_withnanfilled(self, boundary): ''' Test that the different modes are producing the correct results using a 3x3 uniform kernel. This version includes a NaN value in the original array. ''' x = np.array([[0., 0., 4.], [1., np.nan, 0.], [0., 3., 0.]], dtype='>f8') y = np.array([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]], dtype='>f8') z = convolve(x, y, boundary=boundary, nan_treatment='fill', normalize_kernel=False) if boundary is None: assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.], [0., 8., 0.], [0., 0., 0.]], dtype='>f8'), 10) elif boundary == 'fill': assert_array_almost_equal_nulp(z, np.array([[1., 5., 4.], [4., 8., 7.], [4., 4., 3.]], dtype='>f8'), 10) elif boundary == 'wrap': assert_array_almost_equal_nulp(z, np.array([[8., 8., 8.], [8., 8., 8.], [8., 8., 8.]], dtype='>f8'), 10) elif boundary == 'extend': assert_array_almost_equal_nulp(z, np.array([[2., 9., 16.], [5., 8., 11.], [8., 7., 6.]], dtype='>f8'), 10) else: raise ValueError("Invalid boundary specification") @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS) def test_uniform_3x3_withnaninterped(self, boundary): ''' Test that the different modes are producing the correct results using a 3x3 uniform kernel. This version includes a NaN value in the original array. ''' x = np.array([[0., 0., 4.], [1., np.nan, 0.], [0., 3., 0.]], dtype='>f8') y = np.array([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]], dtype='>f8') z = convolve(x, y, boundary=boundary, nan_treatment='interpolate', normalize_kernel=True) if boundary is None: assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]], dtype='>f8'), 10) elif boundary == 'fill': assert_array_almost_equal_nulp(z, np.array([[1./8, 5./8, 4./8], [4./8, 8./8, 7./8], [4./8, 4./8, 3./8]], dtype='>f8'), 10) elif boundary == 'wrap': assert_array_almost_equal_nulp(z, np.array([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]], dtype='>f8'), 10) elif boundary == 'extend': assert_array_almost_equal_nulp(z, np.array([[2./8, 9./8, 16./8], [5./8, 8./8, 11./8], [8./8, 7./8, 6./8]], dtype='>f8'), 10) else: raise ValueError("Invalid boundary specification") @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS) def test_non_normalized_kernel_2D(self, boundary): x = np.array([[0., 0., 4.], [1., 2., 0.], [0., 3., 0.]], dtype='float') y = np.array([[1., -1., 1.], [-1., 0., -1.], [1., -1., 1.]], dtype='float') z = convolve(x, y, boundary=boundary, nan_treatment='fill', normalize_kernel=False) if boundary is None: assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype='float'), 10) elif boundary == 'fill': assert_array_almost_equal_nulp(z, np.array([[1., -5., 2.], [1., 0., -3.], [-2., -1., -1.]], dtype='float'), 10) elif boundary == 'wrap': assert_array_almost_equal_nulp(z, np.array([[0., -8., 6.], [5., 0., -4.], [2., 3., -4.]], dtype='float'), 10) elif boundary == 'extend': assert_array_almost_equal_nulp(z, np.array([[2., -1., -2.], [0., 0., 1.], [2., -4., 2.]], dtype='float'), 10) else: raise ValueError("Invalid boundary specification") class TestConvolve3D: def test_list(self): """ Test that convolve works correctly when inputs are lists """ x = [[[1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1]]] z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=False) assert_array_almost_equal_nulp(z / 27, x, 10) @pytest.mark.parametrize(('dtype_array', 'dtype_kernel'), VALID_DTYPES) def test_dtype(self, dtype_array, dtype_kernel): ''' Test that 32- and 64-bit floats are correctly handled ''' x = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]], dtype=dtype_array) y = np.array([[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]], dtype=dtype_kernel) z = convolve(x, y) assert x.dtype == z.dtype @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS) def test_unity_1x1x1_none(self, boundary): ''' Test that a 1x1x1 unit kernel returns the same array ''' x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]], [[4., 3., 1.], [5., 0., 2.], [6., 1., 1.]], [[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8') y = np.array([[[1.]]], dtype='>f8') z = convolve(x, y, boundary=boundary) assert np.all(z == x) @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS) def test_unity_3x3x3(self, boundary): ''' Test that a 3x3x3 unit kernel returns the same array (except when boundary is None). ''' x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]], [[4., 3., 1.], [5., 3., 2.], [6., 1., 1.]], [[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8') y = np.zeros((3, 3, 3), dtype='>f8') y[1, 1, 1] = 1. z = convolve(x, y, boundary=boundary) if boundary is None: assert np.all(z == np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], [[0., 0., 0.], [0., 3., 0.], [0., 0., 0.]], [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8')) else: assert np.all(z == x) @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS) def test_uniform_3x3x3(self, boundary): ''' Test that the different modes are producing the correct results using a 3x3 uniform kernel. ''' x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]], [[4., 3., 1.], [5., 3., 2.], [6., 1., 1.]], [[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8') y = np.ones((3, 3, 3), dtype='>f8') z = convolve(x, y, boundary=boundary, normalize_kernel=False) if boundary is None: assert_array_almost_equal_nulp(z, np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], [[0., 0., 0.], [0., 81., 0.], [0., 0., 0.]], [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'), 10) elif boundary == 'fill': assert_array_almost_equal_nulp(z, np.array([[[23., 28., 16.], [35., 46., 25.], [25., 34., 18.]], [[40., 50., 23.], [63., 81., 36.], [46., 60., 27.]], [[32., 40., 16.], [50., 61., 22.], [36., 44., 16.]]], dtype='>f8'), 10) elif boundary == 'wrap': assert_array_almost_equal_nulp(z, np.array([[[81., 81., 81.], [81., 81., 81.], [81., 81., 81.]], [[81., 81., 81.], [81., 81., 81.], [81., 81., 81.]], [[81., 81., 81.], [81., 81., 81.], [81., 81., 81.]]], dtype='>f8'), 10) else: assert_array_almost_equal_nulp(z, np.array([[[65., 54., 43.], [75., 66., 57.], [85., 78., 71.]], [[96., 71., 46.], [108., 81., 54.], [120., 91., 62.]], [[127., 88., 49.], [141., 96., 51.], [155., 104., 53.]]], dtype='>f8'), 10) @pytest.mark.parametrize(('boundary', 'nan_treatment'), itertools.product(BOUNDARY_OPTIONS, NANHANDLING_OPTIONS)) def test_unity_3x3x3_withnan(self, boundary, nan_treatment): ''' Test that a 3x3x3 unit kernel returns the same array (except when boundary is None). This version includes a NaN value in the original array. ''' x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]], [[4., 3., 1.], [5., np.nan, 2.], [6., 1., 1.]], [[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8') y = np.zeros((3, 3, 3), dtype='>f8') y[1, 1, 1] = 1. z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment, preserve_nan=True) assert np.isnan(z[1, 1, 1]) x = np.nan_to_num(z) z = np.nan_to_num(z) if boundary is None: assert np.all(z == np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8')) else: assert np.all(z == x) @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS) def test_uniform_3x3x3_withnan_filled(self, boundary): ''' Test that the different modes are producing the correct results using a 3x3 uniform kernel. This version includes a NaN value in the original array. ''' x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]], [[4., 3., 1.], [5., np.nan, 2.], [6., 1., 1.]], [[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8') y = np.ones((3, 3, 3), dtype='>f8') z = convolve(x, y, boundary=boundary, nan_treatment='fill', normalize_kernel=False) if boundary is None: assert_array_almost_equal_nulp(z, np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], [[0., 0., 0.], [0., 78., 0.], [0., 0., 0.]], [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'), 10) elif boundary == 'fill': assert_array_almost_equal_nulp(z, np.array([[[20., 25., 13.], [32., 43., 22.], [22., 31., 15.]], [[37., 47., 20.], [60., 78., 33.], [43., 57., 24.]], [[29., 37., 13.], [47., 58., 19.], [33., 41., 13.]]], dtype='>f8'), 10) elif boundary == 'wrap': assert_array_almost_equal_nulp(z, np.array([[[78., 78., 78.], [78., 78., 78.], [78., 78., 78.]], [[78., 78., 78.], [78., 78., 78.], [78., 78., 78.]], [[78., 78., 78.], [78., 78., 78.], [78., 78., 78.]]], dtype='>f8'), 10) elif boundary == 'extend': assert_array_almost_equal_nulp(z, np.array([[[62., 51., 40.], [72., 63., 54.], [82., 75., 68.]], [[93., 68., 43.], [105., 78., 51.], [117., 88., 59.]], [[124., 85., 46.], [138., 93., 48.], [152., 101., 50.]]], dtype='>f8'), 10) else: raise ValueError("Invalid Boundary Option") @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS) def test_uniform_3x3x3_withnan_interped(self, boundary): ''' Test that the different modes are producing the correct results using a 3x3 uniform kernel. This version includes a NaN value in the original array. ''' x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]], [[4., 3., 1.], [5., np.nan, 2.], [6., 1., 1.]], [[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8') y = np.ones((3, 3, 3), dtype='>f8') z = convolve(x, y, boundary=boundary, nan_treatment='interpolate', normalize_kernel=True) kernsum = y.sum() - 1 # one nan is missing mid = x[np.isfinite(x)].sum() / kernsum if boundary is None: assert_array_almost_equal_nulp(z, np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], [[0., 0., 0.], [0., 78., 0.], [0., 0., 0.]], [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8')/kernsum, 10) elif boundary == 'fill': assert_array_almost_equal_nulp(z, np.array([[[20., 25., 13.], [32., 43., 22.], [22., 31., 15.]], [[37., 47., 20.], [60., 78., 33.], [43., 57., 24.]], [[29., 37., 13.], [47., 58., 19.], [33., 41., 13.]]], dtype='>f8')/kernsum, 10) elif boundary == 'wrap': assert_array_almost_equal_nulp(z, np.tile(mid.astype('>f8'), [3, 3, 3]), 10) elif boundary == 'extend': assert_array_almost_equal_nulp(z, np.array([[[62., 51., 40.], [72., 63., 54.], [82., 75., 68.]], [[93., 68., 43.], [105., 78., 51.], [117., 88., 59.]], [[124., 85., 46.], [138., 93., 48.], [152., 101., 50.]]], dtype='>f8')/kernsum, 10) else: raise ValueError("Invalid Boundary Option") @pytest.mark.parametrize(('convfunc', 'boundary'), BOUNDARIES_AND_CONVOLUTIONS) def test_asymmetric_kernel(boundary, convfunc): ''' Regression test for #6264: make sure that asymmetric convolution functions go the right direction ''' x = np.array([3., 0., 1.], dtype='>f8') y = np.array([1, 2, 3], dtype='>f8') z = convolve(x, y, boundary=boundary, normalize_kernel=False) if boundary == 'fill': assert_array_almost_equal_nulp(z, np.array([6., 10., 2.], dtype='float'), 10) elif boundary is None: assert_array_almost_equal_nulp(z, np.array([0., 10., 0.], dtype='float'), 10) elif boundary == 'extend': assert_array_almost_equal_nulp(z, np.array([15., 10., 3.], dtype='float'), 10) elif boundary == 'wrap': assert_array_almost_equal_nulp(z, np.array([9., 10., 5.], dtype='float'), 10) @pytest.mark.parametrize('ndims', (1, 2, 3)) def test_convolution_consistency(ndims): np.random.seed(0) array = np.random.randn(*([3]*ndims)) np.random.seed(0) kernel = np.random.rand(*([3]*ndims)) conv_f = convolve_fft(array, kernel, boundary='fill') conv_d = convolve(array, kernel, boundary='fill') assert_array_almost_equal_nulp(conv_f, conv_d, 30) def test_astropy_convolution_against_numpy(): x = np.array([1, 2, 3]) y = np.array([5, 4, 3, 2, 1]) assert_array_almost_equal(np.convolve(y, x, 'same'), convolve(y, x, normalize_kernel=False)) assert_array_almost_equal(np.convolve(y, x, 'same'), convolve_fft(y, x, normalize_kernel=False)) @pytest.mark.skipif('not HAS_SCIPY') def test_astropy_convolution_against_scipy(): from scipy.signal import fftconvolve x = np.array([1, 2, 3]) y = np.array([5, 4, 3, 2, 1]) assert_array_almost_equal(fftconvolve(y, x, 'same'), convolve(y, x, normalize_kernel=False)) assert_array_almost_equal(fftconvolve(y, x, 'same'), convolve_fft(y, x, normalize_kernel=False))
6a1f796a40df07daba1a6a0d7b88e79943a8dec41df60a4a72912039b4e66151
# Licensed under a 3-clause BSD style license - see LICENSE.rst import math import numpy as np import pytest from ..convolve import convolve, convolve_fft, convolve_models from ...modeling import models, fitting from ...utils.misc import NumpyRNGContext from numpy.testing import assert_allclose, assert_almost_equal try: import scipy except ImportError: HAS_SCIPY = False else: HAS_SCIPY = True class TestConvolve1DModels: @pytest.mark.parametrize('mode', ['convolve_fft', 'convolve']) @pytest.mark.skipif('not HAS_SCIPY') def test_is_consistency_with_astropy_convolution(self, mode): kernel = models.Gaussian1D(1, 0, 1) model = models.Gaussian1D(1, 0, 1) model_conv = convolve_models(model, kernel, mode=mode) x = np.arange(-5, 6) ans = eval("{}(model(x), kernel(x))".format(mode)) assert_allclose(ans, model_conv(x), atol=1e-5) @pytest.mark.parametrize('mode', ['convolve_fft', 'convolve']) @pytest.mark.skipif('not HAS_SCIPY') def test_against_scipy(self, mode): from scipy.signal import fftconvolve kernel = models.Gaussian1D(1, 0, 1) model = models.Gaussian1D(1, 0, 1) model_conv = convolve_models(model, kernel, mode=mode) x = np.arange(-5, 6) ans = fftconvolve(kernel(x), model(x), mode='same') assert_allclose(ans, model_conv(x) * kernel(x).sum(), atol=1e-5) @pytest.mark.parametrize('mode', ['convolve_fft', 'convolve']) @pytest.mark.skipif('not HAS_SCIPY') def test_against_scipy_with_additional_keywords(self, mode): from scipy.signal import fftconvolve kernel = models.Gaussian1D(1, 0, 1) model = models.Gaussian1D(1, 0, 1) model_conv = convolve_models(model, kernel, mode=mode, normalize_kernel=False) x = np.arange(-5, 6) ans = fftconvolve(kernel(x), model(x), mode='same') assert_allclose(ans, model_conv(x), atol=1e-5) @pytest.mark.parametrize('mode', ['convolve_fft', 'convolve']) def test_sum_of_gaussians(self, mode): """ Test that convolving N(a, b) with N(c, d) gives N(a + c, b + d), where N(., .) stands for Gaussian probability density function, in which a and c are their means and b and d are their variances. """ kernel = models.Gaussian1D(1 / math.sqrt(2 * np.pi), 1, 1) model = models.Gaussian1D(1 / math.sqrt(2 * np.pi), 3, 1) model_conv = convolve_models(model, kernel, mode=mode, normalize_kernel=False) ans = models.Gaussian1D(1 / (2 * math.sqrt(np.pi)), 4, np.sqrt(2)) x = np.arange(-5, 6) assert_allclose(ans(x), model_conv(x), atol=1e-3) @pytest.mark.parametrize('mode', ['convolve_fft', 'convolve']) def test_convolve_box_models(self, mode): kernel = models.Box1D() model = models.Box1D() model_conv = convolve_models(model, kernel, mode=mode) x = np.linspace(-1, 1, 99) ans = (x + 1) * (x < 0) + (-x + 1) * (x >= 0) assert_allclose(ans, model_conv(x), atol=1e-3) @pytest.mark.parametrize('mode', ['convolve_fft', 'convolve']) @pytest.mark.skipif('not HAS_SCIPY') def test_fitting_convolve_models(self, mode): """ test that a convolve model can be fitted """ b1 = models.Box1D() g1 = models.Gaussian1D() x = np.linspace(-5, 5, 99) fake_model = models.Gaussian1D(amplitude=10) with NumpyRNGContext(123): fake_data = fake_model(x) + np.random.normal(size=len(x)) init_model = convolve_models(b1, g1, mode=mode, normalize_kernel=False) fitter = fitting.LevMarLSQFitter() fitted_model = fitter(init_model, x, fake_data) me = np.mean(fitted_model(x) - fake_data) assert_almost_equal(me, 0.0, decimal=2)
862ba0a8c26a04774c87e1070a8547a65ad6034282221765ebb0e125b5796dde
# Licensed under a 3-clause BSD style license - see LICENSE.rst import timeit import numpy as np # pylint: disable=W0611 # largest image size to use for "linear" and fft convolutions max_exponents_linear = {1: 15, 2: 7, 3: 5} max_exponents_fft = {1: 15, 2: 10, 3: 7} if __name__ == "__main__": for ndims in [1, 2, 3]: print("\n{}-dimensional arrays ('n' is the size of the image AND " "the kernel)".format(ndims)) print(" ".join(["%17s" % n for n in ("n", "convolve", "convolve_fft")])) for ii in range(3, max_exponents_fft[ndims]): # array = np.random.random([2**ii]*ndims) # test ODD sizes too if ii < max_exponents_fft[ndims]: setup = (""" import numpy as np from astropy.convolution.convolve import convolve from astropy.convolution.convolve import convolve_fft array = np.random.random([%i]*%i) kernel = np.random.random([%i]*%i)""") % (2 ** ii - 1, ndims, 2 ** ii - 1, ndims) print("%16i:" % (int(2 ** ii - 1)), end=' ') if ii <= max_exponents_linear[ndims]: for ffttype, extra in zip(("", "_fft"), ("", "fft_pad=False")): statement = "convolve{}(array, kernel, boundary='fill', {})".format(ffttype, extra) besttime = min(timeit.Timer(stmt=statement, setup=setup).repeat(3, 10)) print("%17f" % (besttime), end=' ') else: print("%17s" % "skipped", end=' ') statement = "convolve_fft(array, kernel, boundary='fill')" besttime = min(timeit.Timer(stmt=statement, setup=setup).repeat(3, 10)) print("%17f" % (besttime), end=' ') print() setup = (""" import numpy as np from astropy.convolution.convolve import convolve from astropy.convolution.convolve import convolve_fft array = np.random.random([%i]*%i) kernel = np.random.random([%i]*%i)""") % (2 ** ii - 1, ndims, 2 ** ii - 1, ndims) print("%16i:" % (int(2 ** ii)), end=' ') if ii <= max_exponents_linear[ndims]: for ffttype in ("", "_fft"): statement = "convolve{}(array, kernel, boundary='fill')".format(ffttype) besttime = min(timeit.Timer(stmt=statement, setup=setup).repeat(3, 10)) print("%17f" % (besttime), end=' ') else: print("%17s" % "skipped", end=' ') statement = "convolve_fft(array, kernel, boundary='fill')" besttime = min(timeit.Timer(stmt=statement, setup=setup).repeat(3, 10)) print("%17f" % (besttime), end=' ') print() """ Unfortunately, these tests are pretty strongly inconclusive RESULTS on a 2011 Mac Air: 1-dimensional arrays ('n' is the size of the image AND the kernel) n convolve convolve_fftnp convolve_fftw convolve_fftsp 7: 0.000408 0.002334 0.005571 0.002677 8: 0.000399 0.002818 0.006505 0.003094 15: 0.000361 0.002491 0.005648 0.002678 16: 0.000371 0.002997 0.005983 0.003036 31: 0.000535 0.002450 0.005988 0.002880 32: 0.000452 0.002618 0.007102 0.004366 63: 0.000509 0.002876 0.008003 0.002981 64: 0.000453 0.002706 0.005520 0.003049 127: 0.000801 0.004080 0.008513 0.003932 128: 0.000749 0.003332 0.006236 0.003159 255: 0.002453 0.003111 0.007518 0.003564 256: 0.002478 0.003341 0.006325 0.004290 511: 0.008394 0.006224 0.010247 0.005991 512: 0.007934 0.003764 0.006840 0.004106 1023: 0.028741 0.007538 0.009591 0.007696 1024: 0.027900 0.004871 0.009628 0.005118 2047: 0.106323 0.021575 0.022041 0.020682 2048: 0.108916 0.008107 0.011049 0.007596 4095: 0.411936 0.021675 0.019761 0.020939 4096: 0.408992 0.018870 0.016663 0.012890 8191: 1.664517 8.278320 0.073001 7.803563 8192: 1.657573 0.037967 0.034227 0.028390 16383: 6.654678 0.251661 0.202271 0.222171 16384: 6.611977 0.073630 0.067616 0.055591 2-dimensional arrays ('n' is the size of the image AND the kernel) n convolve convolve_fftnp convolve_fftw convolve_fftsp 7: 0.000552 0.003524 0.006667 0.004318 8: 0.000646 0.004443 0.007354 0.003958 15: 0.002986 0.005093 0.012941 0.005951 16: 0.003549 0.005688 0.008818 0.006300 31: 0.074360 0.033973 0.031800 0.036937 32: 0.077338 0.017708 0.025637 0.011883 63: 0.848471 0.057407 0.052192 0.053213 64: 0.773061 0.029657 0.033409 0.028230 127: 14.656414 1.005329 0.402113 0.955279 128: 15.867796 0.266233 0.268551 0.237930 255: skipped 1.715546 1.566876 1.745338 256: skipped 1.515616 1.268220 1.036881 511: skipped 4.066155 4.303350 3.930661 512: skipped 3.976139 4.337525 3.968935 3-dimensional arrays ('n' is the size of the image AND the kernel) n convolve convolve_fftnp convolve_fftw convolve_fftsp 7: 0.009239 0.012957 0.011957 0.015997 8: 0.012405 0.011328 0.011677 0.012283 15: 0.772434 0.075621 0.056711 0.079508 16: 0.964635 0.105846 0.072811 0.104611 31: 62.824051 2.295193 1.189505 2.351136 32: 79.507060 1.169182 0.821779 1.275770 63: skipped 11.250225 10.982726 10.585744 64: skipped 10.013558 11.507645 12.665557 On a 2009 Mac Pro: 1-dimensional arrays ('n' is the size of the image AND the kernel) n convolve convolve_fftnp convolve_fftw convolve_fftsp 7: 0.000360 0.002269 0.004986 0.002476 8: 0.000361 0.002468 0.005242 0.002696 15: 0.000364 0.002255 0.005244 0.002471 16: 0.000365 0.002506 0.005286 0.002727 31: 0.000385 0.002380 0.005422 0.002588 32: 0.000385 0.002531 0.005543 0.002737 63: 0.000474 0.002407 0.005392 0.002637 64: 0.000484 0.002602 0.005631 0.002823 127: 0.000752 0.004122 0.007827 0.003966 128: 0.000757 0.002763 0.005844 0.002958 255: 0.004316 0.003258 0.006566 0.003324 256: 0.004354 0.003180 0.006120 0.003245 511: 0.011517 0.007158 0.009898 0.006238 512: 0.011482 0.003873 0.006777 0.003820 1023: 0.034105 0.009211 0.009468 0.008260 1024: 0.034609 0.005504 0.008399 0.005080 2047: 0.113620 0.028097 0.020662 0.021603 2048: 0.112828 0.008403 0.010939 0.007331 4095: 0.403373 0.023211 0.018767 0.020065 4096: 0.403316 0.017550 0.017853 0.013651 8191: 1.519329 8.454573 0.211436 7.212381 8192: 1.519082 0.033148 0.030370 0.025905 16383: 5.887481 0.317428 0.153344 0.237119 16384: 5.888222 0.069379 0.065264 0.052847 2-dimensional arrays ('n' is the size of the image AND the kernel) n convolve convolve_fftnp convolve_fftw convolve_fftsp 7: 0.000474 0.003470 0.006131 0.003503 8: 0.000503 0.003565 0.006400 0.003586 15: 0.002011 0.004481 0.007825 0.004496 16: 0.002236 0.004744 0.007078 0.004680 31: 0.027291 0.019433 0.014841 0.018034 32: 0.029283 0.009244 0.010161 0.008964 63: 0.445680 0.038171 0.026753 0.037404 64: 0.460616 0.028128 0.029487 0.029149 127: 7.003774 0.925921 0.282591 0.762671 128: 7.063657 0.110838 0.104402 0.133523 255: skipped 0.804682 0.708849 0.869368 256: skipped 0.797800 0.721042 0.880848 511: skipped 3.643626 3.687562 4.584770 512: skipped 3.715215 4.893539 5.538462 3-dimensional arrays ('n' is the size of the image AND the kernel) n convolve convolve_fftnp convolve_fftw convolve_fftsp 7: 0.004520 0.011519 0.009464 0.012335 8: 0.006422 0.010294 0.010220 0.011711 15: 0.329566 0.060978 0.045495 0.073692 16: 0.405275 0.069999 0.040659 0.086114 31: 24.935228 1.654920 0.710509 1.773879 32: 27.524226 0.724053 0.543507 1.027568 63: skipped 8.982771 12.407683 16.900078 64: skipped 8.956070 11.934627 17.296447 """
642359996d5757d9847a4208dd0d28b13dd267f117cc2805c1b58a69a19738b5
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import os import sys import subprocess import pytest from ...tests.helper import catch_warnings from ...utils.data import get_pkg_data_filename from .. import configuration from .. import paths from ...utils.exceptions import AstropyDeprecationWarning def test_paths(): assert 'astropy' in paths.get_config_dir() assert 'astropy' in paths.get_cache_dir() def test_set_temp_config(tmpdir, monkeypatch): monkeypatch.setattr(paths.set_temp_config, '_temp_path', None) orig_config_dir = paths.get_config_dir() temp_config_dir = str(tmpdir.mkdir('config')) temp_astropy_config = os.path.join(temp_config_dir, 'astropy') # Test decorator mode @paths.set_temp_config(temp_config_dir) def test_func(): assert paths.get_config_dir() == temp_astropy_config # Test temporary restoration of original default with paths.set_temp_config() as d: assert d == orig_config_dir == paths.get_config_dir() test_func() # Test context manager mode (with cleanup) with paths.set_temp_config(temp_config_dir, delete=True): assert paths.get_config_dir() == temp_astropy_config assert not os.path.exists(temp_config_dir) def test_set_temp_cache(tmpdir, monkeypatch): monkeypatch.setattr(paths.set_temp_cache, '_temp_path', None) orig_cache_dir = paths.get_cache_dir() temp_cache_dir = str(tmpdir.mkdir('cache')) temp_astropy_cache = os.path.join(temp_cache_dir, 'astropy') # Test decorator mode @paths.set_temp_cache(temp_cache_dir) def test_func(): assert paths.get_cache_dir() == temp_astropy_cache # Test temporary restoration of original default with paths.set_temp_cache() as d: assert d == orig_cache_dir == paths.get_cache_dir() test_func() # Test context manager mode (with cleanup) with paths.set_temp_cache(temp_cache_dir, delete=True): assert paths.get_cache_dir() == temp_astropy_cache assert not os.path.exists(temp_cache_dir) def test_config_file(): from ..configuration import get_config, reload_config apycfg = get_config('astropy') assert apycfg.filename.endswith('astropy.cfg') cfgsec = get_config('astropy.config') assert cfgsec.depth == 1 assert cfgsec.name == 'config' assert cfgsec.parent.filename.endswith('astropy.cfg') reload_config('astropy') def test_configitem(): from ..configuration import ConfigNamespace, ConfigItem, get_config ci = ConfigItem(34, 'this is a Description') class Conf(ConfigNamespace): tstnm = ci conf = Conf() assert ci.module == 'astropy.config.tests.test_configs' assert ci() == 34 assert ci.description == 'this is a Description' assert conf.tstnm == 34 sec = get_config(ci.module) assert sec['tstnm'] == 34 ci.description = 'updated Descr' ci.set(32) assert ci() == 32 # It's useful to go back to the default to allow other test functions to # call this one and still be in the default configuration. ci.description = 'this is a Description' ci.set(34) assert ci() == 34 def test_configitem_types(): from ..configuration import ConfigNamespace, ConfigItem cio = ConfigItem(['op1', 'op2', 'op3']) class Conf(ConfigNamespace): tstnm1 = ConfigItem(34) tstnm2 = ConfigItem(34.3) tstnm3 = ConfigItem(True) tstnm4 = ConfigItem('astring') conf = Conf() assert isinstance(conf.tstnm1, int) assert isinstance(conf.tstnm2, float) assert isinstance(conf.tstnm3, bool) assert isinstance(conf.tstnm4, str) with pytest.raises(TypeError): conf.tstnm1 = 34.3 conf.tstnm2 = 12 # this would should succeed as up-casting with pytest.raises(TypeError): conf.tstnm3 = 'fasd' with pytest.raises(TypeError): conf.tstnm4 = 546.245 def test_configitem_options(tmpdir): from ..configuration import ConfigNamespace, ConfigItem, get_config cio = ConfigItem(['op1', 'op2', 'op3']) class Conf(ConfigNamespace): tstnmo = cio conf = Conf() sec = get_config(cio.module) assert isinstance(cio(), str) assert cio() == 'op1' assert sec['tstnmo'] == 'op1' cio.set('op2') with pytest.raises(TypeError): cio.set('op5') assert sec['tstnmo'] == 'op2' # now try saving apycfg = sec while apycfg.parent is not apycfg: apycfg = apycfg.parent f = tmpdir.join('astropy.cfg') with open(f.strpath, 'wb') as fd: apycfg.write(fd) with open(f.strpath, 'r', encoding='utf-8') as fd: lns = [x.strip() for x in f.readlines()] assert 'tstnmo = op2' in lns def test_config_noastropy_fallback(monkeypatch): """ Tests to make sure configuration items fall back to their defaults when there's a problem accessing the astropy directory """ # make sure the config directory is not searched monkeypatch.setenv(str('XDG_CONFIG_HOME'), 'foo') monkeypatch.delenv(str('XDG_CONFIG_HOME')) monkeypatch.setattr(paths.set_temp_config, '_temp_path', None) # make sure the _find_or_create_astropy_dir function fails as though the # astropy dir could not be accessed def osraiser(dirnm, linkto): raise OSError monkeypatch.setattr(paths, '_find_or_create_astropy_dir', osraiser) # also have to make sure the stored configuration objects are cleared monkeypatch.setattr(configuration, '_cfgobjs', {}) with pytest.raises(OSError): # make sure the config dir search fails paths.get_config_dir() # now run the basic tests, and make sure the warning about no astropy # is present with catch_warnings(configuration.ConfigurationMissingWarning) as w: test_configitem() assert len(w) == 1 w = w[0] assert 'Configuration defaults will be used' in str(w.message) def test_configitem_setters(): from ..configuration import ConfigNamespace, ConfigItem class Conf(ConfigNamespace): tstnm12 = ConfigItem(42, 'this is another Description') conf = Conf() assert conf.tstnm12 == 42 with conf.set_temp('tstnm12', 45): assert conf.tstnm12 == 45 assert conf.tstnm12 == 42 conf.tstnm12 = 43 assert conf.tstnm12 == 43 with conf.set_temp('tstnm12', 46): assert conf.tstnm12 == 46 # Make sure it is reset even with Exception try: with conf.set_temp('tstnm12', 47): raise Exception except Exception: pass assert conf.tstnm12 == 43 def test_empty_config_file(): from ..configuration import is_unedited_config_file def get_content(fn): with open(get_pkg_data_filename(fn), 'rt', encoding='latin-1') as fd: return fd.read() content = get_content('data/empty.cfg') assert is_unedited_config_file(content) content = get_content('data/not_empty.cfg') assert not is_unedited_config_file(content) content = get_content('data/astropy.0.3.cfg') assert is_unedited_config_file(content) content = get_content('data/astropy.0.3.windows.cfg') assert is_unedited_config_file(content) class TestAliasRead: def setup_class(self): configuration._override_config_file = get_pkg_data_filename('data/alias.cfg') def test_alias_read(self): from astropy.utils.data import conf with catch_warnings() as w: conf.reload() assert conf.remote_timeout == 42 assert len(w) == 1 assert str(w[0].message).startswith( "Config parameter 'name_resolve_timeout' in section " "[coordinates.name_resolve]") def teardown_class(self): from astropy.utils.data import conf configuration._override_config_file = None conf.reload() def test_configitem_unicode(tmpdir): from ..configuration import ConfigNamespace, ConfigItem, get_config cio = ConfigItem('ასტრონომიის') class Conf(ConfigNamespace): tstunicode = cio conf = Conf() sec = get_config(cio.module) assert isinstance(cio(), str) assert cio() == 'ასტრონომიის' assert sec['tstunicode'] == 'ასტრონომიის' def test_warning_move_to_top_level(): # Check that the warning about deprecation config items in the # file works. See #2514 from ... import conf configuration._override_config_file = get_pkg_data_filename('data/deprecated.cfg') try: with catch_warnings(AstropyDeprecationWarning) as w: conf.reload() conf.max_lines assert len(w) == 1 finally: configuration._override_config_file = None conf.reload() def test_no_home(): # "import astropy" fails when neither $HOME or $XDG_CONFIG_HOME # are set. To test, we unset those environment variables for a # subprocess and try to import astropy. test_path = os.path.dirname(__file__) astropy_path = os.path.abspath( os.path.join(test_path, '..', '..', '..')) env = os.environ.copy() paths = [astropy_path] if env.get('PYTHONPATH'): paths.append(env.get('PYTHONPATH')) env[str('PYTHONPATH')] = str(os.pathsep.join(paths)) for val in ['HOME', 'XDG_CONFIG_HOME']: if val in env: del env[val] retcode = subprocess.check_call( [sys.executable, '-c', 'import astropy'], env=env) assert retcode == 0 def test_unedited_template(): # Test that the config file is written at most once config_dir = os.path.join(os.path.dirname(__file__), '..', '..') configuration.update_default_config('astropy', config_dir) assert configuration.update_default_config('astropy', config_dir) is False
6071c3406ac8f54469f6ee256a0ac3dd7b0ac36e8941c636e46c9d29d5219733
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Test sky projections defined in WCS Paper II""" import os import pytest import numpy as np from numpy.testing import utils from .. import projections from ..parameters import InputParameterError from ... import units as u from ...io import fits from ... import wcs from ...utils.data import get_pkg_data_filename from ...tests.helper import assert_quantity_allclose def test_Projection_properties(): projection = projections.Sky2Pix_PlateCarree() assert projection.n_inputs == 2 assert projection.n_outputs == 2 PIX_COORDINATES = [-10, 30] pars = [(x,) for x in projections.projcodes] # There is no groundtruth file for the XPH projection available here: # http://www.atnf.csiro.au/people/mcalabre/WCS/example_data.html pars.remove(('XPH',)) @pytest.mark.parametrize(('code',), pars) def test_Sky2Pix(code): """Check astropy model eval against wcslib eval""" wcs_map = os.path.join(os.pardir, os.pardir, "wcs", "tests", "maps", "1904-66_{0}.hdr".format(code)) test_file = get_pkg_data_filename(wcs_map) header = fits.Header.fromfile(test_file, endcard=False, padding=False) params = [] for i in range(3): key = 'PV2_{0}'.format(i + 1) if key in header: params.append(header[key]) w = wcs.WCS(header) w.wcs.crval = [0., 0.] w.wcs.crpix = [0, 0] w.wcs.cdelt = [1, 1] wcslibout = w.wcs.p2s([PIX_COORDINATES], 1) wcs_pix = w.wcs.s2p(wcslibout['world'], 1)['pixcrd'] model = getattr(projections, 'Sky2Pix_' + code) tinv = model(*params) x, y = tinv(wcslibout['phi'], wcslibout['theta']) utils.assert_almost_equal(np.asarray(x), wcs_pix[:, 0]) utils.assert_almost_equal(np.asarray(y), wcs_pix[:, 1]) @pytest.mark.parametrize(('code',), pars) def test_Pix2Sky(code): """Check astropy model eval against wcslib eval""" wcs_map = os.path.join(os.pardir, os.pardir, "wcs", "tests", "maps", "1904-66_{0}.hdr".format(code)) test_file = get_pkg_data_filename(wcs_map) header = fits.Header.fromfile(test_file, endcard=False, padding=False) params = [] for i in range(3): key = 'PV2_{0}'.format(i + 1) if key in header: params.append(header[key]) w = wcs.WCS(header) w.wcs.crval = [0., 0.] w.wcs.crpix = [0, 0] w.wcs.cdelt = [1, 1] wcslibout = w.wcs.p2s([PIX_COORDINATES], 1) wcs_phi = wcslibout['phi'] wcs_theta = wcslibout['theta'] model = getattr(projections, 'Pix2Sky_' + code) tanprj = model(*params) phi, theta = tanprj(*PIX_COORDINATES) utils.assert_almost_equal(np.asarray(phi), wcs_phi) utils.assert_almost_equal(np.asarray(theta), wcs_theta) @pytest.mark.parametrize(('code',), pars) def test_Sky2Pix_unit(code): """Check astropy model eval against wcslib eval""" wcs_map = os.path.join(os.pardir, os.pardir, "wcs", "tests", "maps", "1904-66_{0}.hdr".format(code)) test_file = get_pkg_data_filename(wcs_map) header = fits.Header.fromfile(test_file, endcard=False, padding=False) params = [] for i in range(3): key = 'PV2_{0}'.format(i + 1) if key in header: params.append(header[key]) w = wcs.WCS(header) w.wcs.crval = [0., 0.] w.wcs.crpix = [0, 0] w.wcs.cdelt = [1, 1] wcslibout = w.wcs.p2s([PIX_COORDINATES], 1) wcs_pix = w.wcs.s2p(wcslibout['world'], 1)['pixcrd'] model = getattr(projections, 'Sky2Pix_' + code) tinv = model(*params) x, y = tinv(wcslibout['phi'] * u.deg, wcslibout['theta'] * u.deg) assert_quantity_allclose(x, wcs_pix[:, 0] * u.deg) assert_quantity_allclose(y, wcs_pix[:, 1] * u.deg) @pytest.mark.parametrize(('code',), pars) def test_Pix2Sky_unit(code): """Check astropy model eval against wcslib eval""" wcs_map = os.path.join(os.pardir, os.pardir, "wcs", "tests", "maps", "1904-66_{0}.hdr".format(code)) test_file = get_pkg_data_filename(wcs_map) header = fits.Header.fromfile(test_file, endcard=False, padding=False) params = [] for i in range(3): key = 'PV2_{0}'.format(i + 1) if key in header: params.append(header[key]) w = wcs.WCS(header) w.wcs.crval = [0., 0.] w.wcs.crpix = [0, 0] w.wcs.cdelt = [1, 1] wcslibout = w.wcs.p2s([PIX_COORDINATES], 1) wcs_phi = wcslibout['phi'] wcs_theta = wcslibout['theta'] model = getattr(projections, 'Pix2Sky_' + code) tanprj = model(*params) phi, theta = tanprj(*PIX_COORDINATES * u.deg) assert_quantity_allclose(phi, wcs_phi * u.deg) assert_quantity_allclose(theta, wcs_theta * u.deg) phi, theta = tanprj(*(PIX_COORDINATES * u.deg).to(u.rad)) assert_quantity_allclose(phi, wcs_phi * u.deg) assert_quantity_allclose(theta, wcs_theta * u.deg) phi, theta = tanprj(*(PIX_COORDINATES * u.deg).to(u.arcmin)) assert_quantity_allclose(phi, wcs_phi * u.deg) assert_quantity_allclose(theta, wcs_theta * u.deg) @pytest.mark.parametrize(('code',), pars) def test_projection_default(code): """Check astropy model eval with default parameters""" # Just makes sure that the default parameter values are reasonable # and accepted by wcslib. model = getattr(projections, 'Sky2Pix_' + code) tinv = model() x, y = tinv(45, 45) model = getattr(projections, 'Pix2Sky_' + code) tinv = model() x, y = tinv(0, 0) class TestZenithalPerspective: """Test Zenithal Perspective projection""" def setup_class(self): ID = 'AZP' wcs_map = os.path.join(os.pardir, os.pardir, "wcs", "tests", "maps", "1904-66_{0}.hdr".format(ID)) test_file = get_pkg_data_filename(wcs_map) header = fits.Header.fromfile(test_file, endcard=False, padding=False) self.wazp = wcs.WCS(header) self.wazp.wcs.crpix = np.array([0., 0.]) self.wazp.wcs.crval = np.array([0., 0.]) self.wazp.wcs.cdelt = np.array([1., 1.]) self.pv_kw = [kw[2] for kw in self.wazp.wcs.get_pv()] self.azp = projections.Pix2Sky_ZenithalPerspective(*self.pv_kw) def test_AZP_p2s(self): wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1) wcs_phi = wcslibout['phi'] wcs_theta = wcslibout['theta'] phi, theta = self.azp(-10, 30) utils.assert_almost_equal(np.asarray(phi), wcs_phi) utils.assert_almost_equal(np.asarray(theta), wcs_theta) def test_AZP_s2p(self): wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1) wcs_pix = self.wazp.wcs.s2p(wcslibout['world'], 1)['pixcrd'] x, y = self.azp.inverse(wcslibout['phi'], wcslibout['theta']) utils.assert_almost_equal(np.asarray(x), wcs_pix[:, 0]) utils.assert_almost_equal(np.asarray(y), wcs_pix[:, 1]) class TestCylindricalPerspective: """Test cylindrical perspective projection""" def setup_class(self): ID = "CYP" wcs_map = os.path.join(os.pardir, os.pardir, "wcs", "tests", "maps", "1904-66_{0}.hdr".format(ID)) test_file = get_pkg_data_filename(wcs_map) header = fits.Header.fromfile(test_file, endcard=False, padding=False) self.wazp = wcs.WCS(header) self.wazp.wcs.crpix = np.array([0., 0.]) self.wazp.wcs.crval = np.array([0., 0.]) self.wazp.wcs.cdelt = np.array([1., 1.]) self.pv_kw = [kw[2] for kw in self.wazp.wcs.get_pv()] self.azp = projections.Pix2Sky_CylindricalPerspective(*self.pv_kw) def test_CYP_p2s(self): wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1) wcs_phi = wcslibout['phi'] wcs_theta = wcslibout['theta'] phi, theta = self.azp(-10, 30) utils.assert_almost_equal(np.asarray(phi), wcs_phi) utils.assert_almost_equal(np.asarray(theta), wcs_theta) def test_CYP_s2p(self): wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1) wcs_pix = self.wazp.wcs.s2p(wcslibout['world'], 1)['pixcrd'] x, y = self.azp.inverse(wcslibout['phi'], wcslibout['theta']) utils.assert_almost_equal(np.asarray(x), wcs_pix[:, 0]) utils.assert_almost_equal(np.asarray(y), wcs_pix[:, 1]) def test_AffineTransformation2D(): # Simple test with a scale and translation model = projections.AffineTransformation2D( matrix=[[2, 0], [0, 2]], translation=[1, 1]) # Coordinates for vertices of a rectangle rect = [[0, 0], [1, 0], [0, 3], [1, 3]] x, y = zip(*rect) new_rect = np.vstack(model(x, y)).T assert np.all(new_rect == [[1, 1], [3, 1], [1, 7], [3, 7]]) def test_AffineTransformation2D_inverse(): # Test non-invertible model model1 = projections.AffineTransformation2D( matrix=[[1, 1], [1, 1]]) with pytest.raises(InputParameterError): model1.inverse model2 = projections.AffineTransformation2D( matrix=[[1.2, 3.4], [5.6, 7.8]], translation=[9.1, 10.11]) # Coordinates for vertices of a rectangle rect = [[0, 0], [1, 0], [0, 3], [1, 3]] x, y = zip(*rect) x_new, y_new = model2.inverse(*model2(x, y)) utils.assert_allclose([x, y], [x_new, y_new], atol=1e-10) def test_c_projection_striding(): # This is just a simple test to make sure that the striding is # handled correctly in the projection C extension coords = np.arange(10).reshape((5, 2)) model = projections.Sky2Pix_ZenithalPerspective(2, 30) phi, theta = model(coords[:, 0], coords[:, 1]) utils.assert_almost_equal( phi, [0., 2.2790416, 4.4889294, 6.6250643, 8.68301]) utils.assert_almost_equal( theta, [-76.4816918, -75.3594654, -74.1256332, -72.784558, -71.3406629]) def test_c_projections_shaped(): nx, ny = (5, 2) x = np.linspace(0, 1, nx) y = np.linspace(0, 1, ny) xv, yv = np.meshgrid(x, y) model = projections.Pix2Sky_TAN() phi, theta = model(xv, yv) utils.assert_allclose( phi, [[0., 90., 90., 90., 90.], [180., 165.96375653, 153.43494882, 143.13010235, 135.]]) utils.assert_allclose( theta, [[90., 89.75000159, 89.50001269, 89.25004283, 89.00010152], [89.00010152, 88.96933478, 88.88210788, 88.75019826, 88.58607353]]) def test_affine_with_quantities(): x = 1 y = 2 xdeg = (x * u.pix).to(u.deg, equivalencies=u.pixel_scale(2.5 * u.deg / u.pix)) ydeg = (y * u.pix).to(u.deg, equivalencies=u.pixel_scale(2.5 * u.deg / u.pix)) xpix = x * u.pix ypix = y * u.pix # test affine with matrix only qaff = projections.AffineTransformation2D(matrix=[[1, 2], [2, 1]] * u.deg) with pytest.raises(ValueError): qx1, qy1 = qaff(xpix, ypix, equivalencies={ 'x': u.pixel_scale(2.5 * u.deg / u.pix), 'y': u.pixel_scale(2.5 * u.deg / u.pix)}) # test affine with matrix and translation qaff = projections.AffineTransformation2D(matrix=[[1, 2], [2, 1]] * u.deg, translation=[1, 2] * u.deg) qx1, qy1 = qaff(xpix, ypix, equivalencies={ 'x': u.pixel_scale(2.5 * u.deg / u.pix), 'y': u.pixel_scale(2.5 * u.deg / u.pix)}) aff = projections.AffineTransformation2D(matrix=[[1, 2], [2, 1]], translation=[1, 2]) x1, y1 = aff(xdeg.value, ydeg.value) assert_quantity_allclose(qx1, x1 * u.deg) assert_quantity_allclose(qy1, y1 * u.deg) # test the case of WCS PC and CDELT transformations pc = np.array([[0.86585778922708, 0.50029020461607], [-0.50029020461607, 0.86585778922708]]) cdelt = np.array([[1, 3.0683055555556E-05], [3.0966944444444E-05, 1]]) matrix = cdelt * pc qaff = projections.AffineTransformation2D(matrix=matrix * u.deg, translation=[0, 0] * u.deg) inv_matrix = np.linalg.inv(matrix) inv_qaff = projections.AffineTransformation2D(matrix=inv_matrix * u.pix, translation=[0, 0] * u.pix) qaff.inverse = inv_qaff qx1, qy1 = qaff(xpix, ypix, equivalencies={ 'x': u.pixel_scale(1 * u.deg / u.pix), 'y': u.pixel_scale(1 * u.deg / u.pix)}) x1, y1 = qaff.inverse(qx1, qy1, equivalencies={ 'x': u.pixel_scale(1 * u.deg / u.pix), 'y': u.pixel_scale(1 * u.deg / u.pix)}) assert_quantity_allclose(x1, xpix) assert_quantity_allclose(y1, ypix)
789a070244327612401aea54d3eaf45d637df6b5070d8b6f4aec2d24119030e9
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module provides functions to help with testing against iraf tasks """ from ...logger import log import numpy as np iraf_models_map = {1.: 'Chebyshev', 2.: 'Legendre', 3.: 'Spline3', 4.: 'Spline1'} def get_records(fname): """ Read the records of an IRAF database file into a python list Parameters ---------- fname : str name of an IRAF database file Returns ------- A list of records """ f = open(fname) dtb = f.read() f.close() recs = dtb.split('begin')[1:] records = [Record(r) for r in recs] return records def get_database_string(fname): """ Read an IRAF database file Parameters ---------- fname : str name of an IRAF database file Returns ------- the database file as a string """ f = open(fname) dtb = f.read() f.close() return dtb class Record: """ A base class for all records - represents an IRAF database record Attributes ---------- recstr: string the record as a string fields: dict the fields in the record taskname: string the name of the task which created the database file """ def __init__(self, recstr): self.recstr = recstr self.fields = self.get_fields() self.taskname = self.get_task_name() def aslist(self): reclist = self.recstr.split('\n') reclist = [l.strip() for l in reclist] [reclist.remove(l) for l in reclist if len(l) == 0] return reclist def get_fields(self): # read record fields as an array fields = {} flist = self.aslist() numfields = len(flist) for i in range(numfields): line = flist[i] if line and line[0].isalpha(): field = line.split() if i + 1 < numfields: if not flist[i + 1][0].isalpha(): fields[field[0]] = self.read_array_field( flist[i:i + int(field[1]) + 1]) else: fields[field[0]] = " ".join(s for s in field[1:]) else: fields[field[0]] = " ".join(s for s in field[1:]) else: continue return fields def get_task_name(self): try: return self.fields['task'] except KeyError: return None def read_array_field(self, fieldlist): # Turn an iraf record array field into a numpy array fieldline = [l.split() for l in fieldlist[1:]] # take only the first 3 columns # identify writes also strings at the end of some field lines xyz = [l[:3] for l in fieldline] try: farr = np.array(xyz) except Exception: log.debug("Could not read array field {}".format(fieldlist[0].split()[0])) return farr.astype(np.float64) class IdentifyRecord(Record): """ Represents a database record for the onedspec.identify task Attributes ---------- x: array the X values of the identified features this represents values on axis1 (image rows) y: int the Y values of the identified features (image columns) z: array the values which X maps into modelname: string the function used to fit the data nterms: int degree of the polynomial which was fit to the data in IRAF this is the number of coefficients, not the order mrange: list the range of the data coeff: array function (modelname) coefficients """ def __init__(self, recstr): super().__init__(recstr) self._flatcoeff = self.fields['coefficients'].flatten() self.x = self.fields['features'][:, 0] self.y = self.get_ydata() self.z = self.fields['features'][:, 1] self.modelname = self.get_model_name() self.nterms = self.get_nterms() self.mrange = self.get_range() self.coeff = self.get_coeff() def get_model_name(self): return iraf_models_map[self._flatcoeff[0]] def get_nterms(self): return self._flatcoeff[1] def get_range(self): low = self._flatcoeff[2] high = self._flatcoeff[3] return [low, high] def get_coeff(self): return self._flatcoeff[4:] def get_ydata(self): image = self.fields['image'] left = image.find('[') + 1 right = image.find(']') section = image[left:right] if ',' in section: yind = image.find(',') + 1 return int(image[yind:-1]) else: return int(section) class FitcoordsRecord(Record): """ Represents a database record for the longslit.fitccords task Attributes ---------- modelname: string the function used to fit the data xorder: int number of terms in x yorder: int number of terms in y xbounds: list data range in x ybounds: list data range in y coeff: array function coefficients """ def __init__(self, recstr): super().__init__(recstr) self._surface = self.fields['surface'].flatten() self.modelname = iraf_models_map[self._surface[0]] self.xorder = self._surface[1] self.yorder = self._surface[2] self.xbounds = [self._surface[4], self._surface[5]] self.ybounds = [self._surface[6], self._surface[7]] self.coeff = self.get_coeff() def get_coeff(self): return self._surface[8:] class IDB: """ Base class for an IRAF identify database Attributes ---------- records: list a list of all `IdentifyRecord` in the database numrecords: int number of records """ def __init__(self, dtbstr): self.records = [IdentifyRecord(rstr) for rstr in self.aslist(dtbstr)] self.numrecords = len(self.records) def aslist(self, dtb): # return a list of records # if the first one is a comment remove it from the list rl = dtb.split('begin') try: rl0 = rl[0].split('\n') except Exception: return rl if len(rl0) == 2 and rl0[0].startswith('#') and not rl0[1].strip(): return rl[1:] else: return rl class ReidentifyRecord(IDB): """ Represents a database record for the onedspec.reidentify task """ def __init__(self, databasestr): super().__init__(databasestr) self.x = np.array([r.x for r in self.records]) self.y = self.get_ydata() self.z = np.array([r.z for r in self.records]) def get_ydata(self): y = np.ones(self.x.shape) y = y * np.array([r.y for r in self.records])[:, np.newaxis] return y
8444cdb5499741c7a7e0102016e47a8c3893caf0ee8079fe8adb0495b2b1573c
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Tests for blackbody model and functions.""" import pytest import numpy as np from ..blackbody import BlackBody1D, blackbody_nu, blackbody_lambda, FNU from ..fitting import LevMarLSQFitter from ...tests.helper import assert_quantity_allclose, catch_warnings from ... import constants as const from ... import units as u from ...utils.exceptions import AstropyUserWarning try: from scipy import optimize, integrate # noqa HAS_SCIPY = True except ImportError: HAS_SCIPY = False __doctest_skip__ = ['*'] class TestBlackbody1D: # Make sure the temperature equivalency automatically applies by trying # to pass temperatures in celsius @pytest.mark.parametrize('temperature', (3000 * u.K, 2726.85 * u.deg_C)) def test_evaluate(self, temperature): bolometric_flux = 1000 * u.L_sun / (4 * np.pi * (1.5 * u.pc) ** 2) b = BlackBody1D(temperature=temperature, bolometric_flux=bolometric_flux) assert_quantity_allclose(b(1.4 * u.micron), 4734464.498937388 * u.Jy) assert_quantity_allclose(b(214.13747 * u.THz), 4734464.498937388 * u.Jy) @pytest.mark.skipif('not HAS_SCIPY') def test_fit(self): fitter = LevMarLSQFitter() b = BlackBody1D(3000 * u.K) wav = np.array([0.5, 5, 10]) * u.micron fnu = np.array([1, 10, 5]) * u.Jy b_fit = fitter(b, wav, fnu) assert_quantity_allclose(b_fit.temperature, 2840.744774408546 * u.K) assert_quantity_allclose(b_fit.bolometric_flux, 6.821837296857152e-08 * u.erg / u.cm**2 / u.s) @pytest.mark.skipif('not HAS_SCIPY') def test_blackbody_scipy(): """Test Planck function. .. note:: Needs ``scipy`` to work. """ flux_unit = u.Watt / (u.m ** 2 * u.um) wave = np.logspace(0, 8, 100000) * u.AA temp = 100. * u.K with np.errstate(all='ignore'): bb_nu = blackbody_nu(wave, temp) * u.sr flux = bb_nu.to(flux_unit, u.spectral_density(wave)) / u.sr lum = wave.to(u.um) intflux = integrate.trapz(flux.value, x=lum.value) ans = const.sigma_sb * temp ** 4 / np.pi np.testing.assert_allclose(intflux, ans.value, rtol=0.01) # 1% accuracy def test_blackbody_overflow(): """Test Planck function with overflow.""" photlam = u.photon / (u.cm**2 * u.s * u.AA) wave = [0, 1000.0, 100000.0, 1e55] # Angstrom temp = 10000.0 # Kelvin with np.errstate(all='ignore'): bb_lam = blackbody_lambda(wave, temp) * u.sr flux = bb_lam.to(photlam, u.spectral_density(wave * u.AA)) / u.sr # First element is NaN, last element is very small, others normal assert np.isnan(flux[0]) assert np.log10(flux[-1].value) < -134 np.testing.assert_allclose( flux.value[1:-1], [3.38131732e+16, 3.87451317e+15], rtol=1e-3) # 0.1% accuracy in PHOTLAM/sr with np.errstate(all='ignore'): flux = blackbody_lambda(1, 1e4) assert flux.value == 0 def test_blackbody_synphot(): """Test that it is consistent with IRAF SYNPHOT BBFUNC.""" # Solid angle of solar radius at 1 kpc fac = np.pi * (const.R_sun / const.kpc) ** 2 * u.sr with np.errstate(all='ignore'): flux = blackbody_nu([100, 1, 1000, 1e4, 1e5] * u.AA, 5000) * fac assert flux.unit == FNU # Special check for overflow value (SYNPHOT gives 0) assert np.log10(flux[0].value) < -143 np.testing.assert_allclose( flux.value[1:], [0, 2.01950807e-34, 3.78584515e-26, 1.90431881e-27], rtol=0.01) # 1% accuracy def test_blackbody_exceptions_and_warnings(): """Test exceptions.""" # Negative temperature with pytest.raises(ValueError) as exc: blackbody_nu(1000 * u.AA, -100) assert exc.value.args[0] == 'Temperature should be positive: -100.0 K' # Zero wavelength given for conversion to Hz with catch_warnings(AstropyUserWarning) as w: blackbody_nu(0 * u.AA, 5000) assert len(w) == 1 assert 'invalid' in w[0].message.args[0] # Negative wavelength given for conversion to Hz with catch_warnings(AstropyUserWarning) as w: blackbody_nu(-1. * u.AA, 5000) assert len(w) == 1 assert 'invalid' in w[0].message.args[0] def test_blackbody_array_temperature(): """Regression test to make sure that the temperature can be an array.""" flux = blackbody_nu(1.2 * u.mm, [100, 200, 300] * u.K) np.testing.assert_allclose( flux.value, [1.804908e-12, 3.721328e-12, 5.638513e-12], rtol=1e-5) flux = blackbody_nu([2, 4, 6] * u.mm, [100, 200, 300] * u.K) np.testing.assert_allclose( flux.value, [6.657915e-13, 3.420677e-13, 2.291897e-13], rtol=1e-5) flux = blackbody_nu(np.ones((3, 4)) * u.mm, np.ones(4) * u.K) assert flux.shape == (3, 4)
e834e5b2ba333ec89fee479e387a5f09fe226e4fd3a01ae2099c6c7e95e71710
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Here are all the test parameters and values for the each `~astropy.modeling.FittableModel` defined. There is a dictionary for 1D and a dictionary for 2D models. Explanation of keywords of the dictionaries: "parameters" : list or dict Model parameters, the model is tested with. Make sure you keep the right order. For polynomials you can also use a dict to specify the coefficients. See examples below. "x_values" : list x values where the model is evaluated. "y_values" : list Reference y values for the in x_values given positions. "z_values" : list Reference z values for the in x_values and y_values given positions. (2D model option) "x_lim" : list x test range for the model fitter. Depending on the model this can differ e.g. the PowerLaw model should be tested over a few magnitudes. "y_lim" : list y test range for the model fitter. Depending on the model this can differ e.g. the PowerLaw model should be tested over a few magnitudes. (2D model option) "log_fit" : bool PowerLaw models should be tested over a few magnitudes. So log_fit should be true. "requires_scipy" : bool If a model requires scipy (Bessel functions etc.) set this flag. "integral" : float Approximate value of the integral in the range x_lim (and y_lim). "deriv_parameters" : list If given the test of the derivative will use these parameters to create a model (optional) "deriv_initial" : list If given the test of the derivative will use these parameters as initial values for the fit (optional) """ from ..functional_models import ( Gaussian1D, Sine1D, Box1D, Linear1D, Lorentz1D, MexicanHat1D, Trapezoid1D, Const1D, Moffat1D, Gaussian2D, Const2D, Box2D, MexicanHat2D, TrapezoidDisk2D, AiryDisk2D, Moffat2D, Disk2D, Ring2D, Sersic1D, Sersic2D, Voigt1D, Planar2D) from ..polynomial import Polynomial1D, Polynomial2D from ..powerlaws import ( PowerLaw1D, BrokenPowerLaw1D, SmoothlyBrokenPowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D) import numpy as np # 1D Models models_1D = { Gaussian1D: { 'parameters': [1, 0, 1], 'x_values': [0, np.sqrt(2), -np.sqrt(2)], 'y_values': [1.0, 0.367879, 0.367879], 'x_lim': [-10, 10], 'integral': np.sqrt(2 * np.pi) }, Sine1D: { 'parameters': [1, 0.1, 0], 'x_values': [0, 2.5], 'y_values': [0, 1], 'x_lim': [-10, 10], 'integral': 0 }, Box1D: { 'parameters': [1, 0, 10], 'x_values': [-5, 5, 0, -10, 10], 'y_values': [1, 1, 1, 0, 0], 'x_lim': [-10, 10], 'integral': 10 }, Linear1D: { 'parameters': [1, 0], 'x_values': [0, np.pi, 42, -1], 'y_values': [0, np.pi, 42, -1], 'x_lim': [-10, 10], 'integral': 0 }, Lorentz1D: { 'parameters': [1, 0, 1], 'x_values': [0, -1, 1, 0.5, -0.5], 'y_values': [1., 0.2, 0.2, 0.5, 0.5], 'x_lim': [-10, 10], 'integral': 1 }, MexicanHat1D: { 'parameters': [1, 0, 1], 'x_values': [0, 1, -1, 3, -3], 'y_values': [1.0, 0.0, 0.0, -0.088872, -0.088872], 'x_lim': [-20, 20], 'integral': 0 }, Trapezoid1D: { 'parameters': [1, 0, 2, 1], 'x_values': [0, 1, -1, 1.5, -1.5, 2, 2], 'y_values': [1, 1, 1, 0.5, 0.5, 0, 0], 'x_lim': [-10, 10], 'integral': 3 }, Const1D: { 'parameters': [1], 'x_values': [-1, 1, np.pi, -42., 0], 'y_values': [1, 1, 1, 1, 1], 'x_lim': [-10, 10], 'integral': 20 }, Moffat1D: { 'parameters': [1, 0, 1, 2], 'x_values': [0, 1, -1, 3, -3], 'y_values': [1.0, 0.25, 0.25, 0.01, 0.01], 'x_lim': [-10, 10], 'integral': 1, 'deriv_parameters': [23.4, 1.2, 2.1, 2.3], 'deriv_initial': [10, 1, 1, 1] }, PowerLaw1D: { 'parameters': [1, 1, 2], 'constraints': {'fixed': {'x_0': True}}, 'x_values': [1, 10, 100], 'y_values': [1.0, 0.01, 0.0001], 'x_lim': [1, 10], 'log_fit': True, 'integral': 0.99 }, BrokenPowerLaw1D: { 'parameters': [1, 1, 2, 3], 'constraints': {'fixed': {'x_break': True}}, 'x_values': [0.1, 1, 10, 100], 'y_values': [1e2, 1.0, 1e-3, 1e-6], 'x_lim': [0.1, 100], 'log_fit': True }, SmoothlyBrokenPowerLaw1D: { 'parameters': [1, 1, -2, 2, 0.5], 'constraints': {'fixed': {'x_break': True, 'delta': True}}, 'x_values': [0.01, 1, 100], 'y_values': [3.99920012e-04, 1.0, 3.99920012e-04], 'x_lim': [0.01, 100], 'log_fit': True }, ExponentialCutoffPowerLaw1D: { 'parameters': [1, 1, 2, 3], 'constraints': {'fixed': {'x_0': True}}, 'x_values': [0.1, 1, 10, 100], 'y_values': [9.67216100e+01, 7.16531311e-01, 3.56739933e-04, 3.33823780e-19], 'x_lim': [0.01, 100], 'log_fit': True }, LogParabola1D: { 'parameters': [1, 2, 3, 0.1], 'constraints': {'fixed': {'x_0': True}}, 'x_values': [0.1, 1, 10, 100], 'y_values': [3.26089063e+03, 7.62472488e+00, 6.17440488e-03, 1.73160572e-06], 'x_lim': [0.1, 100], 'log_fit': True }, Polynomial1D: { 'parameters': {'degree': 2, 'c0': 1., 'c1': 1., 'c2': 1.}, 'x_values': [1, 10, 100], 'y_values': [3, 111, 10101], 'x_lim': [-3, 3] }, Sersic1D: { 'parameters': [1, 20, 4], 'x_values': [0.1, 1, 10, 100], 'y_values': [2.78629391e+02, 5.69791430e+01, 3.38788244e+00, 2.23941982e-02], 'requires_scipy': True, 'x_lim': [0, 10], 'log_fit': True }, Voigt1D: { 'parameters': [0, 1, 0.5, 0.9], 'x_values': [0, 2, 4, 8, 10], 'y_values': [0.520935, 0.017205, 0.003998, 0.000983, 0.000628], 'x_lim': [-3, 3] } } # 2D Models models_2D = { Gaussian2D: { 'parameters': [1, 0, 0, 1, 1], 'constraints': {'fixed': {'theta': True}}, 'x_values': [0, np.sqrt(2), -np.sqrt(2)], 'y_values': [0, np.sqrt(2), -np.sqrt(2)], 'z_values': [1, 1. / np.exp(1) ** 2, 1. / np.exp(1) ** 2], 'x_lim': [-10, 10], 'y_lim': [-10, 10], 'integral': 2 * np.pi, 'deriv_parameters': [137., 5.1, 5.4, 1.5, 2., np.pi/4], 'deriv_initial': [10, 5, 5, 4, 4, .5] }, Const2D: { 'parameters': [1], 'x_values': [-1, 1, np.pi, -42., 0], 'y_values': [0, 1, 42, np.pi, -1], 'z_values': [1, 1, 1, 1, 1], 'x_lim': [-10, 10], 'y_lim': [-10, 10], 'integral': 400 }, Box2D: { 'parameters': [1, 0, 0, 10, 10], 'x_values': [-5, 5, -5, 5, 0, -10, 10], 'y_values': [-5, 5, 0, 0, 0, -10, 10], 'z_values': [1, 1, 1, 1, 1, 0, 0], 'x_lim': [-10, 10], 'y_lim': [-10, 10], 'integral': 100 }, MexicanHat2D: { 'parameters': [1, 0, 0, 1], 'x_values': [0, 0, 0, 0, 0, 1, -1, 3, -3], 'y_values': [0, 1, -1, 3, -3, 0, 0, 0, 0], 'z_values': [1.0, 0.303265, 0.303265, -0.038881, -0.038881, 0.303265, 0.303265, -0.038881, -0.038881], 'x_lim': [-10, 11], 'y_lim': [-10, 11], 'integral': 0 }, TrapezoidDisk2D: { 'parameters': [1, 0, 0, 1, 1], 'x_values': [0, 0.5, 0, 1.5], 'y_values': [0, 0.5, 1.5, 0], 'z_values': [1, 1, 0.5, 0.5], 'x_lim': [-3, 3], 'y_lim': [-3, 3] }, AiryDisk2D: { 'parameters': [7, 0, 0, 10], 'x_values': [0, 1, -1, -0.5, -0.5], 'y_values': [0, -1, 0.5, 0.5, -0.5], 'z_values': [7., 6.50158267, 6.68490643, 6.87251093, 6.87251093], 'x_lim': [-10, 10], 'y_lim': [-10, 10], 'requires_scipy': True }, Moffat2D: { 'parameters': [1, 0, 0, 1, 2], 'x_values': [0, 1, -1, 3, -3], 'y_values': [0, -1, 3, 1, -3], 'z_values': [1.0, 0.111111, 0.008264, 0.008264, 0.00277], 'x_lim': [-3, 3], 'y_lim': [-3, 3] }, Polynomial2D: { 'parameters': {'degree': 1, 'c0_0': 1., 'c1_0': 1., 'c0_1': 1.}, 'x_values': [1, 2, 3], 'y_values': [1, 3, 2], 'z_values': [3, 6, 6], 'x_lim': [1, 100], 'y_lim': [1, 100] }, Disk2D: { 'parameters': [1, 0, 0, 5], 'x_values': [-5, 5, -5, 5, 0, -10, 10], 'y_values': [-5, 5, 0, 0, 0, -10, 10], 'z_values': [0, 0, 1, 1, 1, 0, 0], 'x_lim': [-10, 10], 'y_lim': [-10, 10], 'integral': np.pi * 5 ** 2 }, Ring2D: { 'parameters': [1, 0, 0, 5, 5], 'x_values': [-5, 5, -5, 5, 0, -10, 10], 'y_values': [-5, 5, 0, 0, 0, -10, 10], 'z_values': [1, 1, 1, 1, 0, 0, 0], 'x_lim': [-10, 10], 'y_lim': [-10, 10], 'integral': np.pi * (10 ** 2 - 5 ** 2) }, Sersic2D: { 'parameters': [1, 25, 4, 50, 50, 0.5, -1], 'x_values': [0.0, 1, 10, 100], 'y_values': [1, 100, 0.0, 10], 'z_values': [1.686398e-02, 9.095221e-02, 2.341879e-02, 9.419231e-02], 'requires_scipy': True, 'x_lim': [1, 1e10], 'y_lim': [1, 1e10] }, Planar2D: { 'parameters': [1, 1, 0], 'x_values': [0, np.pi, 42, -1], 'y_values': [np.pi, 0, -1, 42], 'z_values': [np.pi, np.pi, 41, 41], 'x_lim': [-10, 10], 'y_lim': [-10, 10], 'integral': 0 } }
6e62862e8cbb29b81fc8b1813331a7e4600bd55c33b97c2d943cfdf40201a879
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Module to test fitting routines """ import os.path import pytest import numpy as np from numpy import linalg from numpy.testing.utils import assert_allclose, assert_almost_equal from unittest import mock from . import irafutil from .. import models from ..core import Fittable2DModel, Parameter from ..fitting import * from ...utils import NumpyRNGContext from ...utils.data import get_pkg_data_filename from .utils import ignore_non_integer_warning from ...stats import sigma_clip from ...utils.exceptions import AstropyUserWarning from ..fitting import populate_entry_points import warnings try: from scipy import optimize HAS_SCIPY = True except ImportError: HAS_SCIPY = False try: from pkg_resources import EntryPoint HAS_PKG = True except ImportError: HAS_PKG = False fitters = [SimplexLSQFitter, SLSQPLSQFitter] _RANDOM_SEED = 0x1337 class TestPolynomial2D: """Tests for 2D polynomail fitting.""" def setup_class(self): self.model = models.Polynomial2D(2) self.y, self.x = np.mgrid[:5, :5] def poly2(x, y): return 1 + 2 * x + 3 * x ** 2 + 4 * y + 5 * y ** 2 + 6 * x * y self.z = poly2(self.x, self.y) self.fitter = LinearLSQFitter() def test_poly2D_fitting(self): v = self.model.fit_deriv(x=self.x, y=self.y) p = linalg.lstsq(v, self.z.flatten())[0] new_model = self.fitter(self.model, self.x, self.y, self.z) assert_allclose(new_model.parameters, p) def test_eval(self): new_model = self.fitter(self.model, self.x, self.y, self.z) assert_allclose(new_model(self.x, self.y), self.z) @pytest.mark.skipif('not HAS_SCIPY') def test_polynomial2D_nonlinear_fitting(self): self.model.parameters = [.6, 1.8, 2.9, 3.7, 4.9, 6.7] nlfitter = LevMarLSQFitter() new_model = nlfitter(self.model, self.x, self.y, self.z) assert_allclose(new_model.parameters, [1, 2, 3, 4, 5, 6]) class TestICheb2D: """ Tests 2D Chebyshev polynomial fitting Create a 2D polynomial (z) using Polynomial2DModel and default coefficients Fit z using a ICheb2D model Evaluate the ICheb2D polynomial and compare with the initial z """ def setup_class(self): self.pmodel = models.Polynomial2D(2) self.y, self.x = np.mgrid[:5, :5] self.z = self.pmodel(self.x, self.y) self.cheb2 = models.Chebyshev2D(2, 2) self.fitter = LinearLSQFitter() def test_default_params(self): self.cheb2.parameters = np.arange(9) p = np.array([1344., 1772., 400., 1860., 2448., 552., 432., 568., 128.]) z = self.cheb2(self.x, self.y) model = self.fitter(self.cheb2, self.x, self.y, z) assert_almost_equal(model.parameters, p) def test_poly2D_cheb2D(self): model = self.fitter(self.cheb2, self.x, self.y, self.z) z1 = model(self.x, self.y) assert_almost_equal(self.z, z1) @pytest.mark.skipif('not HAS_SCIPY') def test_chebyshev2D_nonlinear_fitting(self): cheb2d = models.Chebyshev2D(2, 2) cheb2d.parameters = np.arange(9) z = cheb2d(self.x, self.y) cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9] nlfitter = LevMarLSQFitter() model = nlfitter(cheb2d, self.x, self.y, z) assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8], atol=10**-9) @pytest.mark.skipif('not HAS_SCIPY') def test_chebyshev2D_nonlinear_fitting_with_weights(self): cheb2d = models.Chebyshev2D(2, 2) cheb2d.parameters = np.arange(9) z = cheb2d(self.x, self.y) cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9] nlfitter = LevMarLSQFitter() weights = np.ones_like(self.y) model = nlfitter(cheb2d, self.x, self.y, z, weights=weights) assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8], atol=10**-9) @pytest.mark.skipif('not HAS_SCIPY') class TestJointFitter: """ Tests the joint fitting routine using 2 gaussian models """ def setup_class(self): """ Create 2 gaussian models and some data with noise. Create a fitter for the two models keeping the amplitude parameter common for the two models. """ self.g1 = models.Gaussian1D(10, mean=14.9, stddev=.3) self.g2 = models.Gaussian1D(10, mean=13, stddev=.4) self.jf = JointFitter([self.g1, self.g2], {self.g1: ['amplitude'], self.g2: ['amplitude']}, [9.8]) self.x = np.arange(10, 20, .1) y1 = self.g1(self.x) y2 = self.g2(self.x) with NumpyRNGContext(_RANDOM_SEED): n = np.random.randn(100) self.ny1 = y1 + 2 * n self.ny2 = y2 + 2 * n self.jf(self.x, self.ny1, self.x, self.ny2) def test_joint_parameter(self): """ Tests that the amplitude of the two models is the same """ assert_allclose(self.jf.fitparams[0], self.g1.parameters[0]) assert_allclose(self.jf.fitparams[0], self.g2.parameters[0]) def test_joint_fitter(self): """ Tests the fitting routine with similar procedure. Compares the fitted parameters. """ p1 = [14.9, .3] p2 = [13, .4] A = 9.8 p = np.r_[A, p1, p2] def model(A, p, x): return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2) def errfunc(p, x1, y1, x2, y2): return np.ravel(np.r_[model(p[0], p[1:3], x1) - y1, model(p[0], p[3:], x2) - y2]) coeff, _ = optimize.leastsq(errfunc, p, args=(self.x, self.ny1, self.x, self.ny2)) assert_allclose(coeff, self.jf.fitparams, rtol=10 ** (-2)) class TestLinearLSQFitter: def test_chebyshev1D(self): """Tests fitting a 1D Chebyshev polynomial to some real world data.""" test_file = get_pkg_data_filename(os.path.join('data', 'idcompspec.fits')) with open(test_file) as f: lines = f.read() reclist = lines.split('begin') record = irafutil.IdentifyRecord(reclist[1]) coeffs = record.coeff order = int(record.fields['order']) initial_model = models.Chebyshev1D(order - 1, domain=record.get_range()) fitter = LinearLSQFitter() fitted_model = fitter(initial_model, record.x, record.z) assert_allclose(fitted_model.parameters, np.array(coeffs), rtol=10e-2) def test_linear_fit_model_set(self): """Tests fitting multiple models simultaneously.""" init_model = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2) x = np.arange(10) y_expected = init_model(x, model_set_axis=False) assert y_expected.shape == (2, 10) # Add a bit of random noise with NumpyRNGContext(_RANDOM_SEED): y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape) fitter = LinearLSQFitter() fitted_model = fitter(init_model, x, y) assert_allclose(fitted_model(x, model_set_axis=False), y_expected, rtol=1e-1) def test_linear_fit_2d_model_set(self): """Tests fitted multiple 2-D models simultaneously.""" init_model = models.Polynomial2D(degree=2, c0_0=[1, 1], n_models=2) x = np.arange(10) y = np.arange(10) z_expected = init_model(x, y, model_set_axis=False) assert z_expected.shape == (2, 10) # Add a bit of random noise with NumpyRNGContext(_RANDOM_SEED): z = z_expected + np.random.normal(0, 0.01, size=z_expected.shape) fitter = LinearLSQFitter() fitted_model = fitter(init_model, x, y, z) assert_allclose(fitted_model(x, y, model_set_axis=False), z_expected, rtol=1e-1) def test_linear_fit_fixed_parameter(self): """ Tests fitting a polynomial model with a fixed parameter (issue #6135). """ init_model = models.Polynomial1D(degree=2, c1=1) init_model.c1.fixed = True x = np.arange(10) y = 2 + x + 0.5*x*x fitter = LinearLSQFitter() fitted_model = fitter(init_model, x, y) assert_allclose(fitted_model.parameters, [2., 1., 0.5], atol=1e-14) def test_linear_fit_model_set_fixed_parameter(self): """ Tests fitting a polynomial model set with a fixed parameter (#6135). """ init_model = models.Polynomial1D(degree=2, c1=[1, -2], n_models=2) init_model.c1.fixed = True x = np.arange(10) yy = np.array([2 + x + 0.5*x*x, -2*x]) fitter = LinearLSQFitter() fitted_model = fitter(init_model, x, yy) assert_allclose(fitted_model.c0, [2., 0.], atol=1e-14) assert_allclose(fitted_model.c1, [1., -2.], atol=1e-14) assert_allclose(fitted_model.c2, [0.5, 0.], atol=1e-14) def test_linear_fit_2d_model_set_fixed_parameters(self): """ Tests fitting a 2d polynomial model set with fixed parameters (#6135). """ init_model = models.Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1], n_models=2, fixed={'c1_0': True, 'c0_1': True}) x, y = np.mgrid[0:5, 0:5] zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y]) fitter = LinearLSQFitter() fitted_model = fitter(init_model, x, y, zz) assert_allclose(fitted_model(x, y, model_set_axis=False), zz, atol=1e-14) def test_linear_fit_model_set_masked_values(self): """ Tests model set fitting with masked value(s) (#4824, #6819). """ # NB. For single models, there is an equivalent doctest. init_model = models.Polynomial1D(degree=1, n_models=2) x = np.arange(10) y = np.ma.masked_array([2*x+1, x-2], mask=np.zeros_like([x, x])) y[0, 7] = 100. # throw off fit coefficients if unmasked y.mask[0, 7] = True y[1, 1:3] = -100. y.mask[1, 1:3] = True fitter = LinearLSQFitter() fitted_model = fitter(init_model, x, y) assert_allclose(fitted_model.c0, [1., -2.], atol=1e-14) assert_allclose(fitted_model.c1, [2., 1.], atol=1e-14) def test_linear_fit_2d_model_set_masked_values(self): """ Tests 2D model set fitting with masked value(s) (#4824, #6819). """ init_model = models.Polynomial2D(1, n_models=2) x, y = np.mgrid[0:5, 0:5] z = np.ma.masked_array([2*x+3*y+1, x-0.5*y-2], mask=np.zeros_like([x, x])) z[0, 3, 1] = -1000. # throw off fit coefficients if unmasked z.mask[0, 3, 1] = True fitter = LinearLSQFitter() fitted_model = fitter(init_model, x, y, z) assert_allclose(fitted_model.c0_0, [1., -2.], atol=1e-14) assert_allclose(fitted_model.c1_0, [2., 1.], atol=1e-14) assert_allclose(fitted_model.c0_1, [3., -0.5], atol=1e-14) @pytest.mark.skipif('not HAS_SCIPY') class TestNonLinearFitters: """Tests non-linear least squares fitting and the SLSQP algorithm.""" def setup_class(self): self.initial_values = [100, 5, 1] self.xdata = np.arange(0, 10, 0.1) sigma = 4. * np.ones_like(self.xdata) with NumpyRNGContext(_RANDOM_SEED): yerror = np.random.normal(0, sigma) def func(p, x): return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2) self.ydata = func(self.initial_values, self.xdata) + yerror self.gauss = models.Gaussian1D(100, 5, stddev=1) def test_estimated_vs_analytic_deriv(self): """ Runs `LevMarLSQFitter` with estimated and analytic derivatives of a `Gaussian1D`. """ fitter = LevMarLSQFitter() model = fitter(self.gauss, self.xdata, self.ydata) g1e = models.Gaussian1D(100, 5.0, stddev=1) efitter = LevMarLSQFitter() emodel = efitter(g1e, self.xdata, self.ydata, estimate_jacobian=True) assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3)) def test_estimated_vs_analytic_deriv_with_weights(self): """ Runs `LevMarLSQFitter` with estimated and analytic derivatives of a `Gaussian1D`. """ weights = 1.0 / (self.ydata / 10.) fitter = LevMarLSQFitter() model = fitter(self.gauss, self.xdata, self.ydata, weights=weights) g1e = models.Gaussian1D(100, 5.0, stddev=1) efitter = LevMarLSQFitter() emodel = efitter(g1e, self.xdata, self.ydata, weights=weights, estimate_jacobian=True) assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3)) def test_with_optimize(self): """ Tests results from `LevMarLSQFitter` against `scipy.optimize.leastsq`. """ fitter = LevMarLSQFitter() model = fitter(self.gauss, self.xdata, self.ydata, estimate_jacobian=True) def func(p, x): return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2) def errfunc(p, x, y): return func(p, x) - y result = optimize.leastsq(errfunc, self.initial_values, args=(self.xdata, self.ydata)) assert_allclose(model.parameters, result[0], rtol=10 ** (-3)) def test_with_weights(self): """ Tests results from `LevMarLSQFitter` with weights. """ # part 1: weights are equal to 1 fitter = LevMarLSQFitter() model = fitter(self.gauss, self.xdata, self.ydata, estimate_jacobian=True) withw = fitter(self.gauss, self.xdata, self.ydata, estimate_jacobian=True, weights=np.ones_like(self.xdata)) assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4)) # part 2: weights are 0 or 1 (effectively, they are a mask) weights = np.zeros_like(self.xdata) weights[::2] = 1. mask = weights >= 1. model = fitter(self.gauss, self.xdata[mask], self.ydata[mask], estimate_jacobian=True) withw = fitter(self.gauss, self.xdata, self.ydata, estimate_jacobian=True, weights=weights) assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4)) @pytest.mark.parametrize('fitter_class', fitters) def test_fitter_against_LevMar(self, fitter_class): """Tests results from non-linear fitters against `LevMarLSQFitter`.""" levmar = LevMarLSQFitter() fitter = fitter_class() with ignore_non_integer_warning(): new_model = fitter(self.gauss, self.xdata, self.ydata) model = levmar(self.gauss, self.xdata, self.ydata) assert_allclose(model.parameters, new_model.parameters, rtol=10 ** (-4)) def test_LSQ_SLSQP_with_constraints(self): """ Runs `LevMarLSQFitter` and `SLSQPLSQFitter` on a model with constraints. """ g1 = models.Gaussian1D(100, 5, stddev=1) g1.mean.fixed = True fitter = LevMarLSQFitter() fslsqp = SLSQPLSQFitter() with ignore_non_integer_warning(): slsqp_model = fslsqp(g1, self.xdata, self.ydata) model = fitter(g1, self.xdata, self.ydata) assert_allclose(model.parameters, slsqp_model.parameters, rtol=10 ** (-4)) def test_simplex_lsq_fitter(self): """A basic test for the `SimplexLSQ` fitter.""" class Rosenbrock(Fittable2DModel): a = Parameter() b = Parameter() @staticmethod def evaluate(x, y, a, b): return (a - x) ** 2 + b * (y - x ** 2) ** 2 x = y = np.linspace(-3.0, 3.0, 100) with NumpyRNGContext(_RANDOM_SEED): z = Rosenbrock.evaluate(x, y, 1.0, 100.0) z += np.random.normal(0., 0.1, size=z.shape) fitter = SimplexLSQFitter() r_i = Rosenbrock(1, 100) r_f = fitter(r_i, x, y, z) assert_allclose(r_f.parameters, [1.0, 100.0], rtol=1e-2) def test_param_cov(self): """ Tests that the 'param_cov' fit_info entry gets the right answer for *linear* least squares, where the answer is exact """ a = 2 b = 100 with NumpyRNGContext(_RANDOM_SEED): x = np.linspace(0, 1, 100) # y scatter is amplitude ~1 to make sure covarience is # non-negligible y = x*a + b + np.random.randn(len(x)) # first compute the ordinary least squares covariance matrix X = np.matrix(np.vstack([x, np.ones(len(x))]).T) beta = np.linalg.inv(X.T * X) * X.T * np.matrix(y).T s2 = np.sum((y - (X * beta).A.ravel())**2) / (len(y) - len(beta)) olscov = np.linalg.inv(X.T * X) * s2 # now do the non-linear least squares fit mod = models.Linear1D(a, b) fitter = LevMarLSQFitter() fmod = fitter(mod, x, y) assert_allclose(fmod.parameters, beta.A.ravel()) assert_allclose(olscov, fitter.fit_info['param_cov']) @pytest.mark.skipif('not HAS_PKG') class TestEntryPoint: """Tests population of fitting with entry point fitters""" def setup_class(self): self.exception_not_thrown = Exception("The test should not have gotten here. There was no exception thrown") def successfulimport(self): # This should work class goodclass(Fitter): __name__ = "GoodClass" return goodclass def raiseimporterror(self): # This should fail as it raises an Import Error raise ImportError def returnbadfunc(self): def badfunc(): # This should import but it should fail type check pass return badfunc def returnbadclass(self): # This should import But it should fail subclass type check class badclass: pass return badclass def test_working(self): """This should work fine""" mock_entry_working = mock.create_autospec(EntryPoint) mock_entry_working.name = "Working" mock_entry_working.load = self.successfulimport populate_entry_points([mock_entry_working]) def test_import_error(self): """This raises an import error on load to test that it is handled correctly""" with warnings.catch_warnings(): warnings.filterwarnings('error') try: mock_entry_importerror = mock.create_autospec(EntryPoint) mock_entry_importerror.name = "IErr" mock_entry_importerror.load = self.raiseimporterror populate_entry_points([mock_entry_importerror]) except AstropyUserWarning as w: if "ImportError" in w.args[0]: # any error for this case should have this in it. pass else: raise w else: raise self.exception_not_thrown def test_bad_func(self): """This returns a function which fails the type check""" with warnings.catch_warnings(): warnings.filterwarnings('error') try: mock_entry_badfunc = mock.create_autospec(EntryPoint) mock_entry_badfunc.name = "BadFunc" mock_entry_badfunc.load = self.returnbadfunc populate_entry_points([mock_entry_badfunc]) except AstropyUserWarning as w: if "Class" in w.args[0]: # any error for this case should have this in it. pass else: raise w else: raise self.exception_not_thrown def test_bad_class(self): """This returns a class which doesn't inherient from fitter """ with warnings.catch_warnings(): warnings.filterwarnings('error') try: mock_entry_badclass = mock.create_autospec(EntryPoint) mock_entry_badclass.name = "BadClass" mock_entry_badclass.load = self.returnbadclass populate_entry_points([mock_entry_badclass]) except AstropyUserWarning as w: if 'modeling.Fitter' in w.args[0]: # any error for this case should have this in it. pass else: raise w else: raise self.exception_not_thrown @pytest.mark.skipif('not HAS_SCIPY') class Test1DFittingWithOutlierRemoval: def setup_class(self): self.x = np.linspace(-5., 5., 200) self.model_params = (3.0, 1.3, 0.8) def func(p, x): return p[0]*np.exp(-0.5*(x - p[1])**2/p[2]**2) self.y = func(self.model_params, self.x) def test_with_fitters_and_sigma_clip(self): import scipy.stats as stats np.random.seed(0) c = stats.bernoulli.rvs(0.25, size=self.x.shape) self.y += (np.random.normal(0., 0.2, self.x.shape) + c*np.random.normal(3.0, 5.0, self.x.shape)) g_init = models.Gaussian1D(amplitude=1., mean=0, stddev=1.) # test with Levenberg-Marquardt Least Squares fitter fit = FittingWithOutlierRemoval(LevMarLSQFitter(), sigma_clip, niter=3, sigma=3.0) _, fitted_model = fit(g_init, self.x, self.y) assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1) # test with Sequential Least Squares Programming fitter fit = FittingWithOutlierRemoval(SLSQPLSQFitter(), sigma_clip, niter=3, sigma=3.0) _, fitted_model = fit(g_init, self.x, self.y) assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1) # test with Simplex LSQ fitter fit = FittingWithOutlierRemoval(SimplexLSQFitter(), sigma_clip, niter=3, sigma=3.0) _, fitted_model = fit(g_init, self.x, self.y) assert_allclose(fitted_model.parameters, self.model_params, atol=1e-1) @pytest.mark.skipif('not HAS_SCIPY') class Test2DFittingWithOutlierRemoval: def setup_class(self): self.y, self.x = np.mgrid[-3:3:128j, -3:3:128j] self.model_params = (3.0, 1.0, 0.0, 0.8, 0.8) def Gaussian_2D(p, pos): return p[0]*np.exp(-0.5*(pos[0] - p[2])**2 / p[4]**2 - 0.5*(pos[1] - p[1])**2 / p[3]**2) self.z = Gaussian_2D(self.model_params, np.array([self.y, self.x])) def initial_guess(self, data, pos): y = pos[0] x = pos[1] """computes the centroid of the data as the initial guess for the center position""" wx = x * data wy = y * data total_intensity = np.sum(data) x_mean = np.sum(wx) / total_intensity y_mean = np.sum(wy) / total_intensity x_to_pixel = x[0].size / (x[x[0].size - 1][x[0].size - 1] - x[0][0]) y_to_pixel = y[0].size / (y[y[0].size - 1][y[0].size - 1] - y[0][0]) x_pos = np.around(x_mean * x_to_pixel + x[0].size / 2.).astype(int) y_pos = np.around(y_mean * y_to_pixel + y[0].size / 2.).astype(int) amplitude = data[y_pos][x_pos] return amplitude, x_mean, y_mean def test_with_fitters_and_sigma_clip(self): import scipy.stats as stats np.random.seed(0) c = stats.bernoulli.rvs(0.25, size=self.z.shape) self.z += (np.random.normal(0., 0.2, self.z.shape) + c*np.random.normal(self.z, 2.0, self.z.shape)) guess = self.initial_guess(self.z, np.array([self.y, self.x])) g2_init = models.Gaussian2D(amplitude=guess[0], x_mean=guess[1], y_mean=guess[2], x_stddev=0.75, y_stddev=1.25) # test with Levenberg-Marquardt Least Squares fitter fit = FittingWithOutlierRemoval(LevMarLSQFitter(), sigma_clip, niter=3, sigma=3.) _, fitted_model = fit(g2_init, self.x, self.y, self.z) assert_allclose(fitted_model.parameters[0:5], self.model_params, atol=1e-1) # test with Sequential Least Squares Programming fitter fit = FittingWithOutlierRemoval(SLSQPLSQFitter(), sigma_clip, niter=3, sigma=3.) _, fitted_model = fit(g2_init, self.x, self.y, self.z) assert_allclose(fitted_model.parameters[0:5], self.model_params, atol=1e-1) # test with Simplex LSQ fitter fit = FittingWithOutlierRemoval(SimplexLSQFitter(), sigma_clip, niter=3, sigma=3.) _, fitted_model = fit(g2_init, self.x, self.y, self.z) assert_allclose(fitted_model.parameters[0:5], self.model_params, atol=1e-1) @pytest.mark.skipif('not HAS_SCIPY') def test_fitters_with_weights(): """Issue #5737 """ Xin, Yin = np.mgrid[0:21, 0:21] fitter = LevMarLSQFitter() with NumpyRNGContext(_RANDOM_SEED): zsig = np.random.normal(0, 0.01, size=Xin.shape) # Non-linear model g2 = models.Gaussian2D(10, 10, 9, 2, 3) z = g2(Xin, Yin) gmod = fitter(models.Gaussian2D(15, 7, 8, 1.3, 1.2), Xin, Yin, z + zsig) assert_allclose(gmod.parameters, g2.parameters, atol=10 ** (-2)) # Linear model p2 = models.Polynomial2D(3) p2.parameters = np.arange(10)/1.2 z = p2(Xin, Yin) pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig) assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2)) @pytest.mark.skipif('not HAS_SCIPY') def test_fitters_interface(): """ Test that **kwargs work with all optimizers. This is a basic smoke test. """ levmar = LevMarLSQFitter() slsqp = SLSQPLSQFitter() simplex = SimplexLSQFitter() kwargs = {'maxiter': 77, 'verblevel': 1, 'epsilon': 1e-2, 'acc': 1e-6} simplex_kwargs = {'maxiter': 77, 'verblevel': 1, 'acc': 1e-6} model = models.Gaussian1D(10, 4, .3) x = np.arange(21) y = model(x) slsqp_model = slsqp(model, x, y, **kwargs) simplex_model = simplex(model, x, y, **simplex_kwargs) kwargs.pop('verblevel') lm_model = levmar(model, x, y, **kwargs)
22d4199ea6f967467003eda1d207ac59cf2670b142d1ef7514ab9a391eee981b
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from numpy.testing import assert_allclose, assert_array_equal from .. import models, InputParameterError from ...coordinates import Angle from .. import fitting from ...tests.helper import catch_warnings from ...utils.exceptions import AstropyDeprecationWarning try: from scipy import optimize # pylint: disable=W0611 HAS_SCIPY = True except ImportError: HAS_SCIPY = False def test_Trapezoid1D(): """Regression test for https://github.com/astropy/astropy/issues/1721""" model = models.Trapezoid1D(amplitude=4.2, x_0=2.0, width=1.0, slope=3) xx = np.linspace(0, 4, 8) yy = model(xx) yy_ref = [0., 1.41428571, 3.12857143, 4.2, 4.2, 3.12857143, 1.41428571, 0.] assert_allclose(yy, yy_ref, rtol=0, atol=1e-6) def test_Gaussian2D(): """ Test rotated elliptical Gaussian2D model. https://github.com/astropy/astropy/pull/2038 """ model = models.Gaussian2D(4.2, 1.7, 3.1, x_stddev=5.1, y_stddev=3.3, theta=np.pi/6.) y, x = np.mgrid[0:5, 0:5] g = model(x, y) g_ref = [[3.01907812, 2.99051889, 2.81271552, 2.5119566, 2.13012709], [3.55982239, 3.6086023, 3.4734158, 3.17454575, 2.75494838], [3.88059142, 4.0257528, 3.96554926, 3.70908389, 3.29410187], [3.91095768, 4.15212857, 4.18567526, 4.00652015, 3.64146544], [3.6440466, 3.95922417, 4.08454159, 4.00113878, 3.72161094]] assert_allclose(g, g_ref, rtol=0, atol=1e-6) assert_allclose([model.x_fwhm, model.y_fwhm], [12.009582229657841, 7.7709061486021325]) def test_Gaussian2DCovariance(): """ Test rotated elliptical Gaussian2D model when cov_matrix is input. https://github.com/astropy/astropy/pull/2199 """ cov_matrix = [[49., -16.], [-16., 9.]] model = models.Gaussian2D(17., 2.0, 2.5, cov_matrix=cov_matrix) y, x = np.mgrid[0:5, 0:5] g = model(x, y) g_ref = [[4.3744505, 5.8413977, 7.42988694, 9.00160175, 10.38794269], [8.83290201, 10.81772851, 12.61946384, 14.02225593, 14.84113227], [13.68528889, 15.37184621, 16.44637743, 16.76048705, 16.26953638], [16.26953638, 16.76048705, 16.44637743, 15.37184621, 13.68528889], [14.84113227, 14.02225593, 12.61946384, 10.81772851, 8.83290201]] assert_allclose(g, g_ref, rtol=0, atol=1e-6) def test_Gaussian2DRotation(): amplitude = 42 x_mean, y_mean = 0, 0 x_stddev, y_stddev = 2, 3 theta = Angle(10, 'deg') pars = dict(amplitude=amplitude, x_mean=x_mean, y_mean=y_mean, x_stddev=x_stddev, y_stddev=y_stddev) rotation = models.Rotation2D(angle=theta.degree) point1 = (x_mean + 2 * x_stddev, y_mean + 2 * y_stddev) point2 = rotation(*point1) g1 = models.Gaussian2D(theta=0, **pars) g2 = models.Gaussian2D(theta=theta.radian, **pars) value1 = g1(*point1) value2 = g2(*point2) assert_allclose(value1, value2) def test_Gaussian2D_invalid_inputs(): x_stddev = 5.1 y_stddev = 3.3 theta = 10 cov_matrix = [[49., -16.], [-16., 9.]] # first make sure the valid ones are OK models.Gaussian2D() models.Gaussian2D(x_stddev=x_stddev, y_stddev=y_stddev, theta=theta) models.Gaussian2D(x_stddev=None, y_stddev=y_stddev, theta=theta) models.Gaussian2D(x_stddev=x_stddev, y_stddev=None, theta=theta) models.Gaussian2D(x_stddev=x_stddev, y_stddev=y_stddev, theta=None) models.Gaussian2D(cov_matrix=cov_matrix) with pytest.raises(InputParameterError): models.Gaussian2D(x_stddev=0, cov_matrix=cov_matrix) with pytest.raises(InputParameterError): models.Gaussian2D(y_stddev=0, cov_matrix=cov_matrix) with pytest.raises(InputParameterError): models.Gaussian2D(theta=0, cov_matrix=cov_matrix) def test_moffat_fwhm(): ans = 34.641016151377542 kwargs = {'gamma': 10, 'alpha': 0.5} m1 = models.Moffat1D(**kwargs) m2 = models.Moffat2D(**kwargs) assert_allclose([m1.fwhm, m2.fwhm], ans) def test_RedshiftScaleFactor(): """Like ``test_ScaleModel()``.""" # Scale by a scalar m = models.RedshiftScaleFactor(0.4) assert m(0) == 0 assert_array_equal(m([1, 2]), [1.4, 2.8]) assert_allclose(m.inverse(m([1, 2])), [1, 2]) # Scale by a list m = models.RedshiftScaleFactor([-0.5, 0, 0.5], n_models=3) assert_array_equal(m(0), 0) assert_array_equal(m([1, 2], model_set_axis=False), [[0.5, 1], [1, 2], [1.5, 3]]) assert_allclose(m.inverse(m([1, 2], model_set_axis=False)), [[1, 2], [1, 2], [1, 2]]) def test_Ellipse2D(): """Test Ellipse2D model.""" amplitude = 7.5 x0, y0 = 15, 15 theta = Angle(45, 'deg') em = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta.radian) y, x = np.mgrid[0:30, 0:30] e = em(x, y) assert np.all(e[e > 0] == amplitude) assert e[y0, x0] == amplitude rotation = models.Rotation2D(angle=theta.degree) point1 = [2, 0] # Rotation2D center is (0, 0) point2 = rotation(*point1) point1 = np.array(point1) + [x0, y0] point2 = np.array(point2) + [x0, y0] e1 = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta=0.) e2 = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta=theta.radian) assert e1(*point1) == e2(*point2) def test_Ellipse2D_circular(): """Test that circular Ellipse2D agrees with Disk2D [3736].""" amplitude = 7.5 radius = 10 size = (radius * 2) + 1 y, x = np.mgrid[0:size, 0:size] ellipse = models.Ellipse2D(amplitude, radius, radius, radius, radius, theta=0)(x, y) disk = models.Disk2D(amplitude, radius, radius, radius)(x, y) assert np.all(ellipse == disk) def test_Scale_inverse(): m = models.Scale(1.2345) assert_allclose(m.inverse(m(6.789)), 6.789) def test_Shift_inverse(): m = models.Shift(1.2345) assert_allclose(m.inverse(m(6.789)), 6.789) @pytest.mark.skipif('not HAS_SCIPY') def test_Shift_model_levmar_fit(): """Test fitting Shift model with LevMarLSQFitter (issue #6103).""" init_model = models.Shift() x = np.arange(10) y = x+0.1 fitter = fitting.LevMarLSQFitter() fitted_model = fitter(init_model, x, y) assert_allclose(fitted_model.parameters, [0.1], atol=1e-15) def test_Shift_model_set_linear_fit(): """Test linear fitting of Shift model (issue #6103).""" init_model = models.Shift(offset=[0, 0], n_models=2) x = np.arange(10) yy = np.array([x+0.1, x-0.2]) fitter = fitting.LinearLSQFitter() fitted_model = fitter(init_model, x, yy) assert_allclose(fitted_model.parameters, [0.1, -0.2], atol=1e-15) def test_Scale_model_set_linear_fit(): """Test linear fitting of Scale model (#6103).""" init_model = models.Scale(factor=[0, 0], n_models=2) x = np.arange(-3, 7) yy = np.array([1.15*x, 0.96*x]) fitter = fitting.LinearLSQFitter() fitted_model = fitter(init_model, x, yy) assert_allclose(fitted_model.parameters, [1.15, 0.96], atol=1e-15) # https://github.com/astropy/astropy/issues/6178 def test_Ring2D_rout(): m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=2, r_out=5) assert m.width.value == 3 @pytest.mark.skipif("not HAS_SCIPY") def test_Voigt1D(): voi = models.Voigt1D(amplitude_L=-0.5, x_0=1.0, fwhm_L=5.0, fwhm_G=5.0) xarr = np.linspace(-5.0, 5.0, num=40) yarr = voi(xarr) voi_init = models.Voigt1D(amplitude_L=-1.0, x_0=1.0, fwhm_L=5.0, fwhm_G=5.0) fitter = fitting.LevMarLSQFitter() voi_fit = fitter(voi_init, xarr, yarr) assert_allclose(voi_fit.param_sets, voi.param_sets) @pytest.mark.skipif("not HAS_SCIPY") def test_compound_models_with_class_variables(): models_2d = [models.AiryDisk2D, models.Sersic2D] models_1d = [models.Sersic1D] for model_2d in models_2d: class CompoundModel2D(models.Const2D + model_2d): pass x, y = np.mgrid[:10, :10] f = CompoundModel2D()(x, y) assert f.shape == (10, 10) for model_1d in models_1d: class CompoundModel1D(models.Const1D + model_1d): pass x = np.arange(10) f = CompoundModel1D()(x) assert f.shape == (10,)
7eedb0bfd98c0467c72a205cf354de8c9f6a00ad8b2ab66a11b275fb7afdb692
# Licensed under a 3-clause BSD style license - see LICENSE.rst import operator import numpy as np from ..utils import ExpressionTree as ET, ellipse_extent from ..models import Ellipse2D def test_traverse_postorder_duplicate_subtrees(): """ Regression test for a bug in `ExpressionTree.traverse_postorder` where given an expression like ``(1 + 2) + (1 + 2)`` where the two proper subtrees are actually the same object. """ subtree = ET('+', ET(1), ET(2)) tree = ET('+', subtree, subtree) traversal = [n.value for n in tree.traverse_postorder()] assert traversal == [1, 2, '+', 1, 2, '+', '+'] # TODO: It might prove useful to implement a simple expression parser to build # trees; this would be easy and might find use elsewhere def test_tree_evaluate_subexpression(): """Test evaluating a subexpression from an expression tree.""" operators = {'+': operator.add, '-': operator.sub, '*': operator.mul, '/': operator.truediv, '**': operator.pow} # The full expression represented by this tree is: # 1.0 + 2 - 3 * 4 / 5 ** 6 (= 2.999232 if you must know) tree = ET('+', ET(1.0), ET('-', ET(2.0), ET('*', ET(3.0), ET('/', ET(4.0), ET('**', ET(5.0), ET(6.0)))))) def test_slice(start, stop, expected): assert np.allclose(tree.evaluate(operators, start=start, stop=stop), expected) assert tree.evaluate(operators) == (1.0 + 2.0 - 3.0 * 4.0 / 5.0 ** 6.0) test_slice(0, 5, (1.0 + 2.0 - 3.0 * 4.0 / 5.0)) test_slice(0, 4, (1.0 + 2.0 - 3.0 * 4.0)) test_slice(0, 3, (1.0 + 2.0 - 3.0)) test_slice(0, 2, (1.0 + 2.0)) test_slice(0, 1, 1.0) test_slice(1, 6, (2.0 - 3.0 * 4.0 / 5.0 ** 6.0)) test_slice(1, 5, (2.0 - 3.0 * 4.0 / 5.0)) test_slice(1, 4, (2.0 - 3.0 * 4.0)) test_slice(1, 3, (2.0 - 3.0)) test_slice(1, 2, 2.0) test_slice(2, 6, (3.0 * 4.0 / 5.0 ** 6.0)) test_slice(2, 5, (3.0 * 4.0 / 5.0)) test_slice(2, 4, (3.0 * 4.0)) test_slice(2, 3, 3.0) test_slice(3, 6, (4.0 / 5.0 ** 6.0)) test_slice(3, 5, (4.0 / 5.0)) test_slice(3, 4, 4.0) test_slice(4, 6, (5.0 ** 6.0)) test_slice(4, 5, 5.0) test_slice(5, 6, 6.0) def test_ellipse_extent(): # Test this properly bounds the ellipse imshape = (100, 100) coords = y, x = np.indices(imshape) amplitude = 1 x0 = 50 y0 = 50 a = 30 b = 10 theta = np.pi / 4 model = Ellipse2D(amplitude, x0, y0, a, b, theta) dx, dy = ellipse_extent(a, b, theta) limits = ((y0 - dy, y0 + dy), (x0 - dx, x0 + dx)) model.bounding_box = limits actual = model.render(coords=coords) expected = model(x, y) # Check that the full ellipse is captured np.testing.assert_allclose(expected, actual, atol=0, rtol=1) # Check the bounding_box isn't too large limits = np.array(limits).flatten() for i in [0, 1]: s = actual.sum(axis=i) diff = np.abs(limits[2 * i] - np.where(s > 0)[0][0]) assert diff < 1
ea90c80944c6526e7566dde6a3d9b83e60922c658e0791157cef76e9dea7fe7c
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Tests for model evaluation. Compare the results of some models with other programs. """ try: import cPickle as pickle except ImportError: import pickle import pytest import numpy as np from numpy.testing import utils from .example_models import models_1D, models_2D from .. import fitting, models from ..core import FittableModel from ..polynomial import PolynomialBase from ... import units as u from ...utils import minversion from ...tests.helper import assert_quantity_allclose from ...utils import NumpyRNGContext try: import scipy from scipy import optimize # pylint: disable=W0611 HAS_SCIPY = True except ImportError: HAS_SCIPY = False HAS_SCIPY_14 = HAS_SCIPY and minversion(scipy, "0.14") @pytest.mark.skipif('not HAS_SCIPY') def test_custom_model(amplitude=4, frequency=1): def sine_model(x, amplitude=4, frequency=1): """ Model function """ return amplitude * np.sin(2 * np.pi * frequency * x) def sine_deriv(x, amplitude=4, frequency=1): """ Jacobian of model function, e.g. derivative of the function with respect to the *parameters* """ da = np.sin(2 * np.pi * frequency * x) df = 2 * np.pi * x * amplitude * np.cos(2 * np.pi * frequency * x) return np.vstack((da, df)) SineModel = models.custom_model(sine_model, fit_deriv=sine_deriv) x = np.linspace(0, 4, 50) sin_model = SineModel() y = sin_model.evaluate(x, 5., 2.) y_prime = sin_model.fit_deriv(x, 5., 2.) np.random.seed(0) data = sin_model(x) + np.random.rand(len(x)) - 0.5 fitter = fitting.LevMarLSQFitter() model = fitter(sin_model, x, data) assert np.all((np.array([model.amplitude.value, model.frequency.value]) - np.array([amplitude, frequency])) < 0.001) def test_custom_model_init(): @models.custom_model def SineModel(x, amplitude=4, frequency=1): """Model function""" return amplitude * np.sin(2 * np.pi * frequency * x) sin_model = SineModel(amplitude=2., frequency=0.5) assert sin_model.amplitude == 2. assert sin_model.frequency == 0.5 def test_custom_model_defaults(): @models.custom_model def SineModel(x, amplitude=4, frequency=1): """Model function""" return amplitude * np.sin(2 * np.pi * frequency * x) sin_model = SineModel() assert SineModel.amplitude.default == 4 assert SineModel.frequency.default == 1 assert sin_model.amplitude == 4 assert sin_model.frequency == 1 def test_custom_model_bounding_box(): """Test bounding box evaluation for a 3D model""" def ellipsoid(x, y, z, x0=13, y0=10, z0=8, a=4, b=3, c=2, amp=1): rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2 val = (rsq < 1) * amp return val class Ellipsoid3D(models.custom_model(ellipsoid)): @property def bounding_box(self): return ((self.z0 - self.c, self.z0 + self.c), (self.y0 - self.b, self.y0 + self.b), (self.x0 - self.a, self.x0 + self.a)) model = Ellipsoid3D() bbox = model.bounding_box zlim, ylim, xlim = bbox dz, dy, dx = np.diff(bbox) / 2 z1, y1, x1 = np.mgrid[slice(zlim[0], zlim[1] + 1), slice(ylim[0], ylim[1] + 1), slice(xlim[0], xlim[1] + 1)] z2, y2, x2 = np.mgrid[slice(zlim[0] - dz, zlim[1] + dz + 1), slice(ylim[0] - dy, ylim[1] + dy + 1), slice(xlim[0] - dx, xlim[1] + dx + 1)] arr = model(x2, y2, z2) sub_arr = model(x1, y1, z1) # check for flux agreement assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * 1e-7 class Fittable2DModelTester: """ Test class for all two dimensional parametric models. Test values have to be defined in example_models.py. It currently test the model with different input types, evaluates the model at different positions and assures that it gives the correct values. And tests if the model works with non-linear fitters. This can be used as a base class for user defined model testing. """ def setup_class(self): self.N = 100 self.M = 100 self.eval_error = 0.0001 self.fit_error = 0.1 self.x = 5.3 self.y = 6.7 self.x1 = np.arange(1, 10, .1) self.y1 = np.arange(1, 10, .1) self.y2, self.x2 = np.mgrid[:10, :8] def test_input2D(self, model_class, test_parameters): """Test model with different input types.""" model = create_model(model_class, test_parameters) model(self.x, self.y) model(self.x1, self.y1) model(self.x2, self.y2) def test_eval2D(self, model_class, test_parameters): """Test model values add certain given points""" model = create_model(model_class, test_parameters) x = test_parameters['x_values'] y = test_parameters['y_values'] z = test_parameters['z_values'] assert np.all((np.abs(model(x, y) - z) < self.eval_error)) def test_bounding_box2D(self, model_class, test_parameters): """Test bounding box evaluation""" model = create_model(model_class, test_parameters) # testing setter model.bounding_box = ((-5, 5), (-5, 5)) assert model.bounding_box == ((-5, 5), (-5, 5)) model.bounding_box = None with pytest.raises(NotImplementedError): model.bounding_box # test the exception of dimensions don't match with pytest.raises(ValueError): model.bounding_box = (-5, 5) del model.bounding_box try: bbox = model.bounding_box except NotImplementedError: pytest.skip("Bounding_box is not defined for model.") ylim, xlim = bbox dy, dx = np.diff(bbox)/2 y1, x1 = np.mgrid[slice(ylim[0], ylim[1] + 1), slice(xlim[0], xlim[1] + 1)] y2, x2 = np.mgrid[slice(ylim[0] - dy, ylim[1] + dy + 1), slice(xlim[0] - dx, xlim[1] + dx + 1)] arr = model(x2, y2) sub_arr = model(x1, y1) # check for flux agreement assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * 1e-7 @pytest.mark.skipif('not HAS_SCIPY') def test_fitter2D(self, model_class, test_parameters): """Test if the parametric model works with the fitter.""" x_lim = test_parameters['x_lim'] y_lim = test_parameters['y_lim'] parameters = test_parameters['parameters'] model = create_model(model_class, test_parameters) if isinstance(parameters, dict): parameters = [parameters[name] for name in model.param_names] if "log_fit" in test_parameters: if test_parameters['log_fit']: x = np.logspace(x_lim[0], x_lim[1], self.N) y = np.logspace(y_lim[0], y_lim[1], self.N) else: x = np.linspace(x_lim[0], x_lim[1], self.N) y = np.linspace(y_lim[0], y_lim[1], self.N) xv, yv = np.meshgrid(x, y) np.random.seed(0) # add 10% noise to the amplitude noise = np.random.rand(self.N, self.N) - 0.5 data = model(xv, yv) + 0.1 * parameters[0] * noise fitter = fitting.LevMarLSQFitter() new_model = fitter(model, xv, yv, data) params = [getattr(new_model, name) for name in new_model.param_names] fixed = [param.fixed for param in params] expected = np.array([val for val, fixed in zip(parameters, fixed) if not fixed]) fitted = np.array([param.value for param in params if not param.fixed]) utils.assert_allclose(fitted, expected, atol=self.fit_error) @pytest.mark.skipif('not HAS_SCIPY') def test_deriv_2D(self, model_class, test_parameters): """ Test the derivative of a model by fitting with an estimated and analytical derivative. """ x_lim = test_parameters['x_lim'] y_lim = test_parameters['y_lim'] if model_class.fit_deriv is None: pytest.skip("Derivative function is not defined for model.") if issubclass(model_class, PolynomialBase): pytest.skip("Skip testing derivative of polynomials.") if "log_fit" in test_parameters: if test_parameters['log_fit']: x = np.logspace(x_lim[0], x_lim[1], self.N) y = np.logspace(y_lim[0], y_lim[1], self.M) else: x = np.linspace(x_lim[0], x_lim[1], self.N) y = np.linspace(y_lim[0], y_lim[1], self.M) xv, yv = np.meshgrid(x, y) try: model_with_deriv = create_model(model_class, test_parameters, use_constraints=False, parameter_key='deriv_initial') model_no_deriv = create_model(model_class, test_parameters, use_constraints=False, parameter_key='deriv_initial') model = create_model(model_class, test_parameters, use_constraints=False, parameter_key='deriv_initial') except KeyError: model_with_deriv = create_model(model_class, test_parameters, use_constraints=False) model_no_deriv = create_model(model_class, test_parameters, use_constraints=False) model = create_model(model_class, test_parameters, use_constraints=False) # add 10% noise to the amplitude rsn = np.random.RandomState(1234567890) amplitude = test_parameters['parameters'][0] n = 0.1 * amplitude * (rsn.rand(self.M, self.N) - 0.5) data = model(xv, yv) + n fitter_with_deriv = fitting.LevMarLSQFitter() new_model_with_deriv = fitter_with_deriv(model_with_deriv, xv, yv, data) fitter_no_deriv = fitting.LevMarLSQFitter() new_model_no_deriv = fitter_no_deriv(model_no_deriv, xv, yv, data, estimate_jacobian=True) utils.assert_allclose(new_model_with_deriv.parameters, new_model_no_deriv.parameters, rtol=0.1) class Fittable1DModelTester: """ Test class for all one dimensional parametric models. Test values have to be defined in example_models.py. It currently test the model with different input types, evaluates the model at different positions and assures that it gives the correct values. And tests if the model works with non-linear fitters. This can be used as a base class for user defined model testing. """ def setup_class(self): self.N = 100 self.M = 100 self.eval_error = 0.0001 self.fit_error = 0.1 self.x = 5.3 self.y = 6.7 self.x1 = np.arange(1, 10, .1) self.y1 = np.arange(1, 10, .1) self.y2, self.x2 = np.mgrid[:10, :8] def test_input1D(self, model_class, test_parameters): """Test model with different input types.""" model = create_model(model_class, test_parameters) model(self.x) model(self.x1) model(self.x2) def test_eval1D(self, model_class, test_parameters): """ Test model values at certain given points """ model = create_model(model_class, test_parameters) x = test_parameters['x_values'] y = test_parameters['y_values'] utils.assert_allclose(model(x), y, atol=self.eval_error) def test_bounding_box1D(self, model_class, test_parameters): """Test bounding box evaluation""" model = create_model(model_class, test_parameters) # testing setter model.bounding_box = (-5, 5) model.bounding_box = None with pytest.raises(NotImplementedError): model.bounding_box del model.bounding_box # test exception if dimensions don't match with pytest.raises(ValueError): model.bounding_box = 5 try: bbox = model.bounding_box except NotImplementedError: pytest.skip("Bounding_box is not defined for model.") if isinstance(model, models.Lorentz1D): rtol = 0.01 # 1% agreement is enough due to very extended wings ddx = 0.1 # Finer sampling to "integrate" flux for narrow peak else: rtol = 1e-7 ddx = 1 dx = np.diff(bbox) / 2 x1 = np.mgrid[slice(bbox[0], bbox[1] + 1, ddx)] x2 = np.mgrid[slice(bbox[0] - dx, bbox[1] + dx + 1, ddx)] arr = model(x2) sub_arr = model(x1) # check for flux agreement assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * rtol @pytest.mark.skipif('not HAS_SCIPY') def test_fitter1D(self, model_class, test_parameters): """ Test if the parametric model works with the fitter. """ x_lim = test_parameters['x_lim'] parameters = test_parameters['parameters'] model = create_model(model_class, test_parameters) if isinstance(parameters, dict): parameters = [parameters[name] for name in model.param_names] if "log_fit" in test_parameters: if test_parameters['log_fit']: x = np.logspace(x_lim[0], x_lim[1], self.N) else: x = np.linspace(x_lim[0], x_lim[1], self.N) np.random.seed(0) # add 10% noise to the amplitude relative_noise_amplitude = 0.01 data = ((1 + relative_noise_amplitude * np.random.randn(len(x))) * model(x)) fitter = fitting.LevMarLSQFitter() new_model = fitter(model, x, data) # Only check parameters that were free in the fit params = [getattr(new_model, name) for name in new_model.param_names] fixed = [param.fixed for param in params] expected = np.array([val for val, fixed in zip(parameters, fixed) if not fixed]) fitted = np.array([param.value for param in params if not param.fixed]) utils.assert_allclose(fitted, expected, atol=self.fit_error) @pytest.mark.skipif('not HAS_SCIPY') def test_deriv_1D(self, model_class, test_parameters): """ Test the derivative of a model by comparing results with an estimated derivative. """ x_lim = test_parameters['x_lim'] if model_class.fit_deriv is None: pytest.skip("Derivative function is not defined for model.") if issubclass(model_class, PolynomialBase): pytest.skip("Skip testing derivative of polynomials.") if "log_fit" in test_parameters: if test_parameters['log_fit']: x = np.logspace(x_lim[0], x_lim[1], self.N) else: x = np.linspace(x_lim[0], x_lim[1], self.N) parameters = test_parameters['parameters'] model_with_deriv = create_model(model_class, test_parameters, use_constraints=False) model_no_deriv = create_model(model_class, test_parameters, use_constraints=False) # add 10% noise to the amplitude rsn = np.random.RandomState(1234567890) n = 0.1 * parameters[0] * (rsn.rand(self.N) - 0.5) data = model_with_deriv(x) + n fitter_with_deriv = fitting.LevMarLSQFitter() new_model_with_deriv = fitter_with_deriv(model_with_deriv, x, data) fitter_no_deriv = fitting.LevMarLSQFitter() new_model_no_deriv = fitter_no_deriv(model_no_deriv, x, data, estimate_jacobian=True) utils.assert_allclose(new_model_with_deriv.parameters, new_model_no_deriv.parameters, atol=0.15) def create_model(model_class, test_parameters, use_constraints=True, parameter_key='parameters'): """Create instance of model class.""" constraints = {} if issubclass(model_class, PolynomialBase): return model_class(**test_parameters[parameter_key]) elif issubclass(model_class, FittableModel): if "requires_scipy" in test_parameters and not HAS_SCIPY: pytest.skip("SciPy not found") if use_constraints: if 'constraints' in test_parameters: constraints = test_parameters['constraints'] return model_class(*test_parameters[parameter_key], **constraints) @pytest.mark.parametrize(('model_class', 'test_parameters'), sorted(models_1D.items(), key=lambda x: str(x[0]))) class TestFittable1DModels(Fittable1DModelTester): pass @pytest.mark.parametrize(('model_class', 'test_parameters'), sorted(models_2D.items(), key=lambda x: str(x[0]))) class TestFittable2DModels(Fittable2DModelTester): pass def test_ShiftModel(): # Shift by a scalar m = models.Shift(42) assert m(0) == 42 utils.assert_equal(m([1, 2]), [43, 44]) # Shift by a list m = models.Shift([42, 43], n_models=2) utils.assert_equal(m(0), [42, 43]) utils.assert_equal(m([1, 2], model_set_axis=False), [[43, 44], [44, 45]]) def test_ScaleModel(): # Scale by a scalar m = models.Scale(42) assert m(0) == 0 utils.assert_equal(m([1, 2]), [42, 84]) # Scale by a list m = models.Scale([42, 43], n_models=2) utils.assert_equal(m(0), [0, 0]) utils.assert_equal(m([1, 2], model_set_axis=False), [[42, 84], [43, 86]]) def test_voigt_model(): """ Currently just tests that the model peaks at its origin. Regression test for https://github.com/astropy/astropy/issues/3942 """ m = models.Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9) x = np.arange(0, 10, 0.01) y = m(x) assert y[500] == y.max() # y[500] is right at the center def test_model_instance_repr(): m = models.Gaussian1D(1.5, 2.5, 3.5) assert repr(m) == '<Gaussian1D(amplitude=1.5, mean=2.5, stddev=3.5)>' @pytest.mark.skipif("not HAS_SCIPY_14") def test_tabular_interp_1d(): """ Test Tabular1D model. """ points = np.arange(0, 5) values = [1., 10, 2, 45, -3] LookupTable = models.tabular_model(1) model = LookupTable(points=points, lookup_table=values) xnew = [0., .7, 1.4, 2.1, 3.9] ans1 = [1., 7.3, 6.8, 6.3, 1.8] utils.assert_allclose(model(xnew), ans1) # Test evaluate without passing `points`. model = LookupTable(lookup_table=values) utils.assert_allclose(model(xnew), ans1) # Test bounds error. xextrap = [0., .7, 1.4, 2.1, 3.9, 4.1] with pytest.raises(ValueError): model(xextrap) # test extrapolation and fill value model = LookupTable(lookup_table=values, bounds_error=False, fill_value=None) utils.assert_allclose(model(xextrap), [1., 7.3, 6.8, 6.3, 1.8, -7.8]) # Test unit support xnew = xnew * u.nm ans1 = ans1 * u.nJy model = LookupTable(points=points*u.nm, lookup_table=values*u.nJy) assert_quantity_allclose(model(xnew), ans1) assert_quantity_allclose(model(xnew.to(u.nm)), ans1) assert model.bounding_box == (0 * u.nm, 4 * u.nm) # Test fill value unit conversion and unitless input on table with unit model = LookupTable([1, 2, 3], [10, 20, 30] * u.nJy, bounds_error=False, fill_value=1e-33*(u.W / (u.m * u.m * u.Hz))) assert_quantity_allclose(model(np.arange(5)), [100, 10, 20, 30, 100] * u.nJy) @pytest.mark.skipif("not HAS_SCIPY_14") def test_tabular_interp_2d(): table = np.array([ [-0.04614432, -0.02512547, -0.00619557, 0.0144165, 0.0297525], [-0.04510594, -0.03183369, -0.01118008, 0.01201388, 0.02496205], [-0.05464094, -0.02804499, -0.00960086, 0.01134333, 0.02284104], [-0.04879338, -0.02539565, -0.00440462, 0.01795145, 0.02122417], [-0.03637372, -0.01630025, -0.00157902, 0.01649774, 0.01952131]]) points = np.arange(0, 5) points = (points, points) xnew = np.array([0., .7, 1.4, 2.1, 3.9]) LookupTable = models.tabular_model(2) model = LookupTable(points, table) znew = model(xnew, xnew) result = np.array( [-0.04614432, -0.03450009, -0.02241028, -0.0069727, 0.01938675]) utils.assert_allclose(znew, result, atol=1e-7) # test 2D arrays as input a = np.arange(12).reshape((3, 4)) y, x = np.mgrid[:3, :4] t = models.Tabular2D(lookup_table=a) r = t(y, x) utils.assert_allclose(a, r) with pytest.raises(ValueError): model = LookupTable(points=([1.2, 2.3], [1.2, 6.7], [3, 4])) with pytest.raises(ValueError): model = LookupTable(lookup_table=[1, 2, 3]) with pytest.raises(NotImplementedError): model = LookupTable(n_models=2) with pytest.raises(ValueError): model = LookupTable(([1, 2], [3, 4]), [5, 6]) with pytest.raises(ValueError): model = LookupTable(([1, 2] * u.m, [3, 4]), [[5, 6], [7, 8]]) with pytest.raises(ValueError): model = LookupTable(points, table, bounds_error=False, fill_value=1*u.Jy) # Test unit support points = points[0] * u.nm points = (points, points) xnew = xnew * u.nm model = LookupTable(points, table * u.nJy) result = result * u.nJy assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7*u.nJy) xnew = xnew.to(u.m) assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7*u.nJy) bbox = (0 * u.nm, 4 * u.nm) bbox = (bbox, bbox) assert model.bounding_box == bbox @pytest.mark.skipif("not HAS_SCIPY_14") def test_tabular_nd(): a = np.arange(24).reshape((2, 3, 4)) x, y, z = np.mgrid[:2, :3, :4] tab = models.tabular_model(3) t = tab(lookup_table=a) result = t(x, y, z) utils.assert_allclose(a, result) with pytest.raises(ValueError): models.tabular_model(0) def test_with_bounding_box(): """ Test the option to evaluate a model respecting its bunding_box. """ p = models.Polynomial2D(2) & models.Polynomial2D(2) m = models.Mapping((0, 1, 0, 1)) | p with NumpyRNGContext(1234567): m.parameters = np.random.rand(12) m.bounding_box = ((3, 9), (1, 8)) x, y = np.mgrid[:10, :10] a, b = m(x, y) aw, bw = m(x, y, with_bounding_box=True) ind = (~np.isnan(aw)).nonzero() utils.assert_allclose(a[ind], aw[ind]) utils.assert_allclose(b[ind], bw[ind]) aw, bw = m(x, y, with_bounding_box=True, fill_value=1000) ind = (aw != 1000).nonzero() utils.assert_allclose(a[ind], aw[ind]) utils.assert_allclose(b[ind], bw[ind]) # test the order of bbox is not reversed for 1D models p = models.Polynomial1D(1, c0=12, c1=2.3) p.bounding_box = (0, 5) assert(p(1) == p(1, with_bounding_box=True))
9ce65d2de3baf99093fb64060977faed7ea90ec8b9757e1e121dd92661b36255
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from numpy.testing import utils from ...wcs import wcs from .. import models from ... import units as u from ...tests.helper import assert_quantity_allclose @pytest.mark.parametrize(('inp'), [(0, 0), (4000, -20.56), (-2001.5, 45.9), (0, 90), (0, -90), (np.mgrid[:4, :6])]) def test_against_wcslib(inp): w = wcs.WCS() crval = [202.4823228, 47.17511893] w.wcs.crval = crval w.wcs.ctype = ['RA---TAN', 'DEC--TAN'] lonpole = 180 tan = models.Pix2Sky_TAN() n2c = models.RotateNative2Celestial(crval[0] * u.deg, crval[1] * u.deg, lonpole * u.deg) c2n = models.RotateCelestial2Native(crval[0] * u.deg, crval[1] * u.deg, lonpole * u.deg) m = tan | n2c minv = c2n | tan.inverse radec = w.wcs_pix2world(inp[0], inp[1], 1) xy = w.wcs_world2pix(radec[0], radec[1], 1) utils.assert_allclose(m(*inp), radec, atol=1e-12) utils.assert_allclose(minv(*radec), xy, atol=1e-12) @pytest.mark.parametrize(('inp'), [(40 * u.deg, -0.057 * u.rad), (21.5 * u.arcsec, 45.9 * u.deg)]) def test_roundtrip_sky_rotaion(inp): lon, lat, lon_pole = 42 * u.deg, (43 * u.deg).to(u.arcsec), (44 * u.deg).to(u.rad) n2c = models.RotateNative2Celestial(lon, lat, lon_pole) c2n = models.RotateCelestial2Native(lon, lat, lon_pole) assert_quantity_allclose(n2c.inverse(*n2c(*inp)), inp, atol=1e-13 * u.deg) assert_quantity_allclose(c2n.inverse(*c2n(*inp)), inp, atol=1e-13 * u.deg) def test_Rotation2D(): model = models.Rotation2D(angle=90 * u.deg) a, b = 1 * u.deg, 0 * u.deg x, y = model(a, b) assert_quantity_allclose([x, y], [0 * u.deg, 1 * u.deg], atol=1e-10 * u.deg) def test_Rotation2D_inverse(): model = models.Rotation2D(angle=234.23494 * u.deg) x, y = model.inverse(*model(1 * u.deg, 0 * u.deg)) assert_quantity_allclose([x, y], [1 * u.deg, 0 * u.deg], atol=1e-10 * u.deg) def test_euler_angle_rotations(): ydeg = (90 * u.deg, 0 * u.deg) y = (90, 0) z = (0, 90) # rotate y into minus z model = models.EulerAngleRotation(0 * u.rad, np.pi / 2 * u.rad, 0 * u.rad, 'zxz') utils.assert_allclose(model(*z), y, atol=10**-12) model = models.EulerAngleRotation(0 * u.deg, 90 * u.deg, 0 * u.deg, 'zxz') assert_quantity_allclose(model(*(z * u.deg)), ydeg, atol=10**-12 * u.deg) @pytest.mark.parametrize(('params'), [(60, 10, 25), (60 * u.deg, 10 * u.deg, 25 * u.deg), ((60 * u.deg).to(u.rad), (10 * u.deg).to(u.rad), (25 * u.deg).to(u.rad))]) def test_euler_rotations_with_units(params): x = 1 * u.deg y = 1 * u.deg phi, theta, psi = params urot = models.EulerAngleRotation(phi, theta, psi, axes_order='xyz') a, b = urot(x.value, y.value) utils.assert_allclose((a, b), (-23.614457631192547, 9.631254579686113)) a, b = urot(x, y) assert_quantity_allclose((a, b), (-23.614457631192547 * u.deg, 9.631254579686113 * u.deg)) a, b = urot(x.to(u.rad), y.to(u.rad)) assert_quantity_allclose((a, b), (-23.614457631192547 * u.deg, 9.631254579686113 * u.deg)) def test_attributes(): n2c = models.RotateNative2Celestial(20016 * u.arcsec, -72.3 * u.deg, np.pi * u.rad) utils.assert_allclose(n2c.lat.value, -72.3) utils.assert_allclose(n2c.lat._raw_value, -1.2618730491919001) utils.assert_allclose(n2c.lon.value, 20016) utils.assert_allclose(n2c.lon._raw_value, 0.09704030641088472) utils.assert_allclose(n2c.lon_pole.value, np.pi) utils.assert_allclose(n2c.lon_pole._raw_value, np.pi) assert(n2c.lon.unit is u.Unit("arcsec")) assert(n2c._param_metrics['lon']['raw_unit'] is u.Unit("rad")) assert(n2c.lat.unit is u.Unit("deg")) assert(n2c._param_metrics['lat']['raw_unit'] is u.Unit("rad")) assert(n2c.lon_pole.unit is u.Unit("rad")) assert(n2c._param_metrics['lon_pole']['raw_unit'] is u.Unit("rad"))
828454abf5598806df52f18316004d61bc79ddf216f169b6001a8f9a8c52ff2b
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Tests that relate to evaluating models with quantity parameters """ import numpy as np import pytest from numpy.testing import assert_allclose from ..core import Model from ..models import Gaussian1D from ... import units as u from ...units import UnitsError from ...tests.helper import assert_quantity_allclose # We start off by taking some simple cases where the units are defined by # whatever the model is initialized with, and we check that the model evaluation # returns quantities. def test_evaluate_with_quantities(): """ Test evaluation of a single model with Quantity parameters that do not explicitly require units. """ # We create two models here - one with quantities, and one without. The one # without is used to create the reference values for comparison. g = Gaussian1D(1, 1, 0.1) gq = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m) # We first check that calling the Gaussian with quantities returns the # expected result assert_quantity_allclose(gq(1 * u.m), g(1) * u.J) # Units have to be specified for the Gaussian with quantities - if not, an # error is raised with pytest.raises(UnitsError) as exc: gq(1) assert exc.value.args[0] == ("Units of input 'x', (dimensionless), could not be " "converted to required input units of m (length)") # However, zero is a special case assert_quantity_allclose(gq(0), g(0) * u.J) # We can also evaluate models with equivalent units assert_allclose(gq(0.0005 * u.km).value, g(0.5)) # But not with incompatible units with pytest.raises(UnitsError) as exc: gq(3 * u.s) assert exc.value.args[0] == ("Units of input 'x', s (time), could not be " "converted to required input units of m (length)") # We also can't evaluate the model without quantities with a quantity with pytest.raises(UnitsError) as exc: g(3 * u.m) # TODO: determine what error message should be here # assert exc.value.args[0] == ("Units of input 'x', m (length), could not be " # "converted to required dimensionless input") def test_evaluate_with_quantities_and_equivalencies(): """ We now make sure that equivalencies are correctly taken into account """ g = Gaussian1D(1 * u.Jy, 10 * u.nm, 2 * u.nm) # We aren't setting the equivalencies, so this won't work with pytest.raises(UnitsError) as exc: g(30 * u.PHz) assert exc.value.args[0] == ("Units of input 'x', PHz (frequency), could " "not be converted to required input units of " "nm (length)") # But it should now work if we pass equivalencies when evaluating assert_quantity_allclose(g(30 * u.PHz, equivalencies={'x': u.spectral()}), g(9.993081933333332 * u.nm)) class MyTestModel(Model): inputs = ('a', 'b') outputs = ('f',) def evaluate(self, a, b): print('a', a) print('b', b) return a * b class TestInputUnits(): def setup_method(self, method): self.model = MyTestModel() def test_evaluate(self): # We should be able to evaluate with anything assert_quantity_allclose(self.model(3, 5), 15) assert_quantity_allclose(self.model(4 * u.m, 5), 20 * u.m) assert_quantity_allclose(self.model(3 * u.deg, 5), 15 * u.deg) def test_input_units(self): self.model.input_units = {'a': u.deg} assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg) assert_quantity_allclose(self.model(4 * u.rad, 2), 8 * u.rad) assert_quantity_allclose(self.model(4 * u.rad, 2 * u.s), 8 * u.rad * u.s) with pytest.raises(UnitsError) as exc: self.model(4 * u.s, 3) assert exc.value.args[0] == ("Units of input 'a', s (time), could not be " "converted to required input units of deg (angle)") with pytest.raises(UnitsError) as exc: self.model(3, 3) assert exc.value.args[0] == ("Units of input 'a', (dimensionless), could " "not be converted to required input units of deg (angle)") def test_input_units_allow_dimensionless(self): self.model.input_units = {'a': u.deg} self.model.input_units_allow_dimensionless = True assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg) assert_quantity_allclose(self.model(4 * u.rad, 2), 8 * u.rad) with pytest.raises(UnitsError) as exc: self.model(4 * u.s, 3) assert exc.value.args[0] == ("Units of input 'a', s (time), could not be " "converted to required input units of deg (angle)") assert_quantity_allclose(self.model(3, 3), 9) def test_input_units_strict(self): self.model.input_units = {'a': u.deg} self.model.input_units_strict = True assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg) result = self.model(np.pi * u.rad, 2) assert_quantity_allclose(result, 360 * u.deg) assert result.unit is u.deg def test_input_units_equivalencies(self): self.model.input_units = {'a': u.micron} with pytest.raises(UnitsError) as exc: self.model(3 * u.PHz, 3) assert exc.value.args[0] == ("Units of input 'a', PHz (frequency), could " "not be converted to required input units of " "micron (length)") self.model.input_units_equivalencies = {'a': u.spectral()} assert_quantity_allclose(self.model(3 * u.PHz, 3), 3 * (3 * u.PHz).to(u.micron, equivalencies=u.spectral())) def test_return_units(self): self.model.input_units = {'a': u.deg} self.model.return_units = {'f': u.rad} result = self.model(3 * u.deg, 4) assert_quantity_allclose(result, 12 * u.deg) assert result.unit is u.rad def test_return_units_scalar(self): # Check that return_units also works when giving a single unit since # there is only one output, so is unambiguous. self.model.input_units = {'a': u.deg} self.model.return_units = u.rad result = self.model(3 * u.deg, 4) assert_quantity_allclose(result, 12 * u.deg) assert result.unit is u.rad
17d0e5f146f144cda41b9d4cfa35a4afc4aa8d385b9203a5019cb06ae2763e28
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Tests that relate to fitting models with quantity parameters """ import numpy as np import pytest from ..models import Gaussian1D from ... import units as u from ...units import UnitsError from ...tests.helper import assert_quantity_allclose from ...utils import NumpyRNGContext from .. import fitting try: from scipy import optimize HAS_SCIPY = True except ImportError: HAS_SCIPY = False # Fitting should be as intuitive as possible to the user. Essentially, models # and fitting should work without units, but if one has units, the other should # have units too, and the resulting fitted parameters will also have units. def _fake_gaussian_data(): # Generate fake data with NumpyRNGContext(12345): x = np.linspace(-5., 5., 2000) y = 3 * np.exp(-0.5 * (x - 1.3)**2 / 0.8**2) y += np.random.normal(0., 0.2, x.shape) # Attach units to data x = x * u.m y = y * u.Jy return x, y @pytest.mark.skipif('not HAS_SCIPY') def test_fitting_simple(): x, y = _fake_gaussian_data() # Fit the data using a Gaussian with units g_init = Gaussian1D() fit_g = fitting.LevMarLSQFitter() g = fit_g(g_init, x, y) # TODO: update actual numerical results once implemented, but these should # be close to the values below. assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05) assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05) assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05) @pytest.mark.skipif('not HAS_SCIPY') def test_fitting_with_initial_values(): x, y = _fake_gaussian_data() # Fit the data using a Gaussian with units g_init = Gaussian1D(amplitude=1. * u.mJy, mean=3 * u.cm, stddev=2 * u.mm) fit_g = fitting.LevMarLSQFitter() g = fit_g(g_init, x, y) # TODO: update actual numerical results once implemented, but these should # be close to the values below. assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05) assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05) assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05) @pytest.mark.skipif('not HAS_SCIPY') def test_fitting_missing_data_units(): """ Raise an error if the model has units but the data doesn't """ g_init = Gaussian1D(amplitude=1. * u.mJy, mean=3 * u.cm, stddev=2 * u.mm) fit_g = fitting.LevMarLSQFitter() with pytest.raises(UnitsError) as exc: fit_g(g_init, [1, 2, 3], [4, 5, 6]) assert exc.value.args[0] == ("'cm' (length) and '' (dimensionless) are not " "convertible") with pytest.raises(UnitsError) as exc: fit_g(g_init, [1, 2, 3] * u.m, [4, 5, 6]) assert exc.value.args[0] == ("'mJy' (spectral flux density) and '' " "(dimensionless) are not convertible") @pytest.mark.skipif('not HAS_SCIPY') def test_fitting_missing_model_units(): """ Proceed if the data has units but the model doesn't """ x, y = _fake_gaussian_data() g_init = Gaussian1D(amplitude=1., mean=3, stddev=2) fit_g = fitting.LevMarLSQFitter() g = fit_g(g_init, x, y) assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05) assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05) assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05) g_init = Gaussian1D(amplitude=1., mean=3 * u.m, stddev=2 * u.m) fit_g = fitting.LevMarLSQFitter() g = fit_g(g_init, x, y) assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05) assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05) assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05) @pytest.mark.skipif('not HAS_SCIPY') def test_fitting_incompatible_units(): """ Raise an error if the data and model have incompatible units """ g_init = Gaussian1D(amplitude=1. * u.Jy, mean=3 * u.m, stddev=2 * u.cm) fit_g = fitting.LevMarLSQFitter() with pytest.raises(UnitsError) as exc: fit_g(g_init, [1, 2, 3] * u.Hz, [4, 5, 6] * u.Jy) assert exc.value.args[0] == ("'Hz' (frequency) and 'm' (length) are not convertible")
5155557de111856bdfb43c1934063f6f4b596f8c433891c85c8694209fe92d44
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module tests fitting and model evaluation with various inputs """ import pytest import numpy as np from numpy.testing.utils import assert_allclose from .. import models from .. import fitting from ..core import Model, FittableModel, Fittable1DModel from ..parameters import Parameter try: from scipy import optimize # pylint: disable=W0611 HAS_SCIPY = True except ImportError: HAS_SCIPY = False model1d_params = [ (models.Polynomial1D, [2]), (models.Legendre1D, [2]), (models.Chebyshev1D, [2]), (models.Shift, [2]), (models.Scale, [2]) ] model2d_params = [ (models.Polynomial2D, [2]), (models.Legendre2D, [1, 2]), (models.Chebyshev2D, [1, 2]) ] class TestInputType: """ This class tests that models accept numbers, lists and arrays. Add new models to one of the lists above to test for this. """ def setup_class(self): self.x = 5.3 self.y = 6.7 self.x1 = np.arange(1, 10, .1) self.y1 = np.arange(1, 10, .1) self.y2, self.x2 = np.mgrid[:10, :8] @pytest.mark.parametrize(('model', 'params'), model1d_params) def test_input1D(self, model, params): m = model(*params) m(self.x) m(self.x1) m(self.x2) @pytest.mark.parametrize(('model', 'params'), model2d_params) def test_input2D(self, model, params): m = model(*params) m(self.x, self.y) m(self.x1, self.y1) m(self.x2, self.y2) class TestFitting: """Test various input options to fitting routines.""" def setup_class(self): self.x1 = np.arange(10) self.y, self.x = np.mgrid[:10, :10] def test_linear_fitter_1set(self): """1 set 1D x, 1pset""" expected = np.array([0, 1, 1, 1]) p1 = models.Polynomial1D(3) p1.parameters = [0, 1, 1, 1] y1 = p1(self.x1) pfit = fitting.LinearLSQFitter() model = pfit(p1, self.x1, y1) assert_allclose(model.parameters, expected, atol=10 ** (-7)) def test_linear_fitter_Nset(self): """1 set 1D x, 2 sets 1D y, 2 param_sets""" expected = np.array([[0, 0], [1, 1], [2, 2], [3, 3]]) p1 = models.Polynomial1D(3, n_models=2) p1.parameters = [0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0] params = {} for i in range(4): params[p1.param_names[i]] = [i, i] p1 = models.Polynomial1D(3, model_set_axis=0, **params) y1 = p1(self.x1, model_set_axis=False) pfit = fitting.LinearLSQFitter() model = pfit(p1, self.x1, y1) assert_allclose(model.param_sets, expected, atol=10 ** (-7)) def test_linear_fitter_1dcheb(self): """1 pset, 1 set 1D x, 1 set 1D y, Chebyshev 1D polynomial""" expected = np.array( [[2817.2499999999995, 4226.6249999999991, 1680.7500000000009, 273.37499999999926]]).T ch1 = models.Chebyshev1D(3) ch1.parameters = [0, 1, 2, 3] y1 = ch1(self.x1) pfit = fitting.LinearLSQFitter() model = pfit(ch1, self.x1, y1) assert_allclose(model.param_sets, expected, atol=10 ** (-2)) def test_linear_fitter_1dlegend(self): """ 1 pset, 1 set 1D x, 1 set 1D y, Legendre 1D polynomial """ expected = np.array( [[1925.5000000000011, 3444.7500000000005, 1883.2500000000014, 364.4999999999996]]).T leg1 = models.Legendre1D(3) leg1.parameters = [1, 2, 3, 4] y1 = leg1(self.x1) pfit = fitting.LinearLSQFitter() model = pfit(leg1, self.x1, y1) assert_allclose(model.param_sets, expected, atol=10 ** (-12)) def test_linear_fitter_1set2d(self): p2 = models.Polynomial2D(2) p2.parameters = [0, 1, 2, 3, 4, 5] expected = [0, 1, 2, 3, 4, 5] z = p2(self.x, self.y) pfit = fitting.LinearLSQFitter() model = pfit(p2, self.x, self.y, z) assert_allclose(model.parameters, expected, atol=10 ** (-12)) assert_allclose(model(self.x, self.y), z, atol=10 ** (-12)) def test_wrong_numpset(self): """ A ValueError is raised if a 1 data set (1d x, 1d y) is fit with a model with multiple parameter sets. """ with pytest.raises(ValueError): p1 = models.Polynomial1D(5) y1 = p1(self.x1) p1 = models.Polynomial1D(5, n_models=2) pfit = fitting.LinearLSQFitter() model = pfit(p1, self.x1, y1) def test_wrong_pset(self): """A case of 1 set of x and multiple sets of y and parameters.""" expected = np.array([[1., 0], [1, 1], [1, 2], [1, 3], [1, 4], [1, 5]]) p1 = models.Polynomial1D(5, n_models=2) params = {} for i in range(6): params[p1.param_names[i]] = [1, i] p1 = models.Polynomial1D(5, model_set_axis=0, **params) y1 = p1(self.x1, model_set_axis=False) pfit = fitting.LinearLSQFitter() model = pfit(p1, self.x1, y1) assert_allclose(model.param_sets, expected, atol=10 ** (-7)) @pytest.mark.skipif('not HAS_SCIPY') def test_nonlinear_lsqt_1set_1d(self): """1 set 1D x, 1 set 1D y, 1 pset NonLinearFitter""" g1 = models.Gaussian1D(10, mean=3, stddev=.2) y1 = g1(self.x1) gfit = fitting.LevMarLSQFitter() model = gfit(g1, self.x1, y1) assert_allclose(model.parameters, [10, 3, .2]) @pytest.mark.skipif('not HAS_SCIPY') def test_nonlinear_lsqt_Nset_1d(self): """1 set 1D x, 1 set 1D y, 2 param_sets, NonLinearFitter""" with pytest.raises(ValueError): g1 = models.Gaussian1D([10.2, 10], mean=[3, 3.2], stddev=[.23, .2], n_models=2) y1 = g1(self.x1, model_set_axis=False) gfit = fitting.LevMarLSQFitter() model = gfit(g1, self.x1, y1) @pytest.mark.skipif('not HAS_SCIPY') def test_nonlinear_lsqt_1set_2d(self): """1 set 2d x, 1set 2D y, 1 pset, NonLinearFitter""" g2 = models.Gaussian2D(10, x_mean=3, y_mean=4, x_stddev=.3, y_stddev=.2, theta=0) z = g2(self.x, self.y) gfit = fitting.LevMarLSQFitter() model = gfit(g2, self.x, self.y, z) assert_allclose(model.parameters, [10, 3, 4, .3, .2, 0]) @pytest.mark.skipif('not HAS_SCIPY') def test_nonlinear_lsqt_Nset_2d(self): """1 set 2d x, 1set 2D y, 2 param_sets, NonLinearFitter""" with pytest.raises(ValueError): g2 = models.Gaussian2D([10, 10], [3, 3], [4, 4], x_stddev=[.3, .3], y_stddev=[.2, .2], theta=[0, 0], n_models=2) z = g2(self.x.flatten(), self.y.flatten()) gfit = fitting.LevMarLSQFitter() model = gfit(g2, self.x, self.y, z) class TestEvaluation: """ Test various input options to model evaluation TestFitting actually covers evaluation of polynomials """ def setup_class(self): self.x1 = np.arange(20) self.y, self.x = np.mgrid[:10, :10] def test_non_linear_NYset(self): """ This case covers: N param sets , 1 set 1D x --> N 1D y data """ g1 = models.Gaussian1D([10, 10], [3, 3], [.2, .2], n_models=2) y1 = g1(self.x1, model_set_axis=False) assert np.all((y1[0, :] - y1[1, :]).nonzero() == np.array([])) def test_non_linear_NXYset(self): """ This case covers: N param sets , N sets 1D x --> N N sets 1D y data """ g1 = models.Gaussian1D([10, 10], [3, 3], [.2, .2], n_models=2) xx = np.array([self.x1, self.x1]) y1 = g1(xx) assert_allclose(y1[:, 0], y1[:, 1], atol=10 ** (-12)) def test_p1_1set_1pset(self): """1 data set, 1 pset, Polynomial1D""" p1 = models.Polynomial1D(4) y1 = p1(self.x1) assert y1.shape == (20,) def test_p1_nset_npset(self): """N data sets, N param_sets, Polynomial1D""" p1 = models.Polynomial1D(4, n_models=2) y1 = p1(np.array([self.x1, self.x1]).T, model_set_axis=-1) assert y1.shape == (2, 20) assert_allclose(y1[0, :], y1[1, :], atol=10 ** (-12)) def test_p2_1set_1pset(self): """1 pset, 1 2D data set, Polynomial2D""" p2 = models.Polynomial2D(5) z = p2(self.x, self.y) assert z.shape == (10, 10) def test_p2_nset_npset(self): """N param_sets, N 2D data sets, Poly2d""" p2 = models.Polynomial2D(5, n_models=2) xx = np.array([self.x, self.x]) yy = np.array([self.y, self.y]) z = p2(xx, yy) assert z.shape == (2, 10, 10) def test_nset_domain(self): """ Polynomial evaluation of multiple data sets with different domain """ xx = np.array([self.x1, self.x1]).T xx[0, 0] = 100 xx[1, 0] = 100 xx[2, 0] = 99 p1 = models.Polynomial1D(5, n_models=2) yy = p1(xx, model_set_axis=-1) x1 = xx[:, 0] x2 = xx[:, 1] p1 = models.Polynomial1D(5) assert_allclose(p1(x1), yy[0, :], atol=10 ** (-12)) p1 = models.Polynomial1D(5) assert_allclose(p1(x2), yy[1, :], atol=10 ** (-12)) def test_evaluate_gauss2d(self): cov = np.array([[1., 0.8], [0.8, 3]]) g = models.Gaussian2D(1., 5., 4., cov_matrix=cov) y, x = np.mgrid[:10, :10] g(x, y) class TModel_1_1(Fittable1DModel): p1 = Parameter() p2 = Parameter() @staticmethod def evaluate(x, p1, p2): return x + p1 + p2 class TestSingleInputSingleOutputSingleModel: """ A suite of tests to check various cases of parameter and input combinations on models with n_input = n_output = 1 on a toy model with n_models=1. Many of these tests mirror test cases in ``astropy.modeling.tests.test_parameters.TestParameterInitialization``, except that this tests how different parameter arrangements interact with different types of model inputs. """ def test_scalar_parameters_scalar_input(self): """ Scalar parameters with a scalar input should return a scalar. """ t = TModel_1_1(1, 10) y = t(100) assert isinstance(y, float) assert np.ndim(y) == 0 assert y == 111 def test_scalar_parameters_1d_array_input(self): """ Scalar parameters should broadcast with an array input to result in an array output of the same shape as the input. """ t = TModel_1_1(1, 10) y = t(np.arange(5) * 100) assert isinstance(y, np.ndarray) assert np.shape(y) == (5,) assert np.all(y == [11, 111, 211, 311, 411]) def test_scalar_parameters_2d_array_input(self): """ Scalar parameters should broadcast with an array input to result in an array output of the same shape as the input. """ t = TModel_1_1(1, 10) y = t(np.arange(6).reshape(2, 3) * 100) assert isinstance(y, np.ndarray) assert np.shape(y) == (2, 3) assert np.all(y == [[11, 111, 211], [311, 411, 511]]) def test_scalar_parameters_3d_array_input(self): """ Scalar parameters should broadcast with an array input to result in an array output of the same shape as the input. """ t = TModel_1_1(1, 10) y = t(np.arange(12).reshape(2, 3, 2) * 100) assert isinstance(y, np.ndarray) assert np.shape(y) == (2, 3, 2) assert np.all(y == [[[11, 111], [211, 311], [411, 511]], [[611, 711], [811, 911], [1011, 1111]]]) def test_1d_array_parameters_scalar_input(self): """ Array parameters should all be broadcastable with each other, and with a scalar input the output should be broadcast to the maximum dimensions of the parameters. """ t = TModel_1_1([1, 2], [10, 20]) y = t(100) assert isinstance(y, np.ndarray) assert np.shape(y) == (2,) assert np.all(y == [111, 122]) def test_1d_array_parameters_1d_array_input(self): """ When given an array input it must be broadcastable with all the parameters. """ t = TModel_1_1([1, 2], [10, 20]) y1 = t([100, 200]) assert np.shape(y1) == (2,) assert np.all(y1 == [111, 222]) y2 = t([[100], [200]]) assert np.shape(y2) == (2, 2) assert np.all(y2 == [[111, 122], [211, 222]]) with pytest.raises(ValueError): # Doesn't broadcast y3 = t([100, 200, 300]) def test_2d_array_parameters_2d_array_input(self): """ When given an array input it must be broadcastable with all the parameters. """ t = TModel_1_1([[1, 2], [3, 4]], [[10, 20], [30, 40]]) y1 = t([[100, 200], [300, 400]]) assert np.shape(y1) == (2, 2) assert np.all(y1 == [[111, 222], [333, 444]]) y2 = t([[[[100]], [[200]]], [[[300]], [[400]]]]) assert np.shape(y2) == (2, 2, 2, 2) assert np.all(y2 == [[[[111, 122], [133, 144]], [[211, 222], [233, 244]]], [[[311, 322], [333, 344]], [[411, 422], [433, 444]]]]) with pytest.raises(ValueError): # Doesn't broadcast y3 = t([[100, 200, 300], [400, 500, 600]]) def test_mixed_array_parameters_1d_array_input(self): """ When given an array input it must be broadcastable with all the parameters. """ t = TModel_1_1([[[0.01, 0.02, 0.03], [0.04, 0.05, 0.06]], [[0.07, 0.08, 0.09], [0.10, 0.11, 0.12]]], [1, 2, 3]) y1 = t([10, 20, 30]) assert np.shape(y1) == (2, 2, 3) assert_allclose(y1, [[[11.01, 22.02, 33.03], [11.04, 22.05, 33.06]], [[11.07, 22.08, 33.09], [11.10, 22.11, 33.12]]]) y2 = t([[[[10]]], [[[20]]], [[[30]]]]) assert np.shape(y2) == (3, 2, 2, 3) assert_allclose(y2, [[[[11.01, 12.02, 13.03], [11.04, 12.05, 13.06]], [[11.07, 12.08, 13.09], [11.10, 12.11, 13.12]]], [[[21.01, 22.02, 23.03], [21.04, 22.05, 23.06]], [[21.07, 22.08, 23.09], [21.10, 22.11, 23.12]]], [[[31.01, 32.02, 33.03], [31.04, 32.05, 33.06]], [[31.07, 32.08, 33.09], [31.10, 32.11, 33.12]]]]) class TestSingleInputSingleOutputTwoModel: """ A suite of tests to check various cases of parameter and input combinations on models with n_input = n_output = 1 on a toy model with n_models=2. Many of these tests mirror test cases in ``astropy.modeling.tests.test_parameters.TestParameterInitialization``, except that this tests how different parameter arrangements interact with different types of model inputs. With n_models=2 all outputs should have a first dimension of size 2 (unless defined with model_set_axis != 0). """ def test_scalar_parameters_scalar_input(self): """ Scalar parameters with a scalar input should return a 1-D array with size equal to the number of models. """ t = TModel_1_1([1, 2], [10, 20], n_models=2) y = t(100) assert np.shape(y) == (2,) assert np.all(y == [111, 122]) def test_scalar_parameters_1d_array_input(self): """ The dimension of the input should match the number of models unless model_set_axis=False is given, in which case the input is copied across all models. """ t = TModel_1_1([1, 2], [10, 20], n_models=2) with pytest.raises(ValueError): y = t(np.arange(5) * 100) y1 = t([100, 200]) assert np.shape(y1) == (2,) assert np.all(y1 == [111, 222]) y2 = t([100, 200], model_set_axis=False) # In this case the value [100, 200, 300] should be evaluated on each # model rather than evaluating the first model with 100 and the second # model with 200 assert np.shape(y2) == (2, 2) assert np.all(y2 == [[111, 211], [122, 222]]) y3 = t([100, 200, 300], model_set_axis=False) assert np.shape(y3) == (2, 3) assert np.all(y3 == [[111, 211, 311], [122, 222, 322]]) def test_scalar_parameters_2d_array_input(self): """ The dimension of the input should match the number of models unless model_set_axis=False is given, in which case the input is copied across all models. """ t = TModel_1_1([1, 2], [10, 20], n_models=2) y1 = t(np.arange(6).reshape(2, 3) * 100) assert np.shape(y1) == (2, 3) assert np.all(y1 == [[11, 111, 211], [322, 422, 522]]) y2 = t(np.arange(6).reshape(2, 3) * 100, model_set_axis=False) assert np.shape(y2) == (2, 2, 3) assert np.all(y2 == [[[11, 111, 211], [311, 411, 511]], [[22, 122, 222], [322, 422, 522]]]) def test_scalar_parameters_3d_array_input(self): """ The dimension of the input should match the number of models unless model_set_axis=False is given, in which case the input is copied across all models. """ t = TModel_1_1([1, 2], [10, 20], n_models=2) data = np.arange(12).reshape(2, 3, 2) * 100 y1 = t(data) assert np.shape(y1) == (2, 3, 2) assert np.all(y1 == [[[11, 111], [211, 311], [411, 511]], [[622, 722], [822, 922], [1022, 1122]]]) y2 = t(data, model_set_axis=False) assert np.shape(y2) == (2, 2, 3, 2) assert np.all(y2 == np.array([data + 11, data + 22])) def test_1d_array_parameters_scalar_input(self): """ Array parameters should all be broadcastable with each other, and with a scalar input the output should be broadcast to the maximum dimensions of the parameters. """ t = TModel_1_1([[1, 2, 3], [4, 5, 6]], [[10, 20, 30], [40, 50, 60]], n_models=2) y = t(100) assert np.shape(y) == (2, 3) assert np.all(y == [[111, 122, 133], [144, 155, 166]]) def test_1d_array_parameters_1d_array_input(self): """ When the input is an array, if model_set_axis=False then it must broadcast with the shapes of the parameters (excluding the model_set_axis). Otherwise all dimensions must be broadcastable. """ t = TModel_1_1([[1, 2, 3], [4, 5, 6]], [[10, 20, 30], [40, 50, 60]], n_models=2) with pytest.raises(ValueError): y1 = t([100, 200, 300]) y1 = t([100, 200]) assert np.shape(y1) == (2, 3) assert np.all(y1 == [[111, 122, 133], [244, 255, 266]]) with pytest.raises(ValueError): # Doesn't broadcast with the shape of the parameters, (3,) y2 = t([100, 200], model_set_axis=False) y2 = t([100, 200, 300], model_set_axis=False) assert np.shape(y2) == (2, 3) assert np.all(y2 == [[111, 222, 333], [144, 255, 366]]) def test_2d_array_parameters_2d_array_input(self): t = TModel_1_1([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[10, 20], [30, 40]], [[50, 60], [70, 80]]], n_models=2) y1 = t([[100, 200], [300, 400]]) assert np.shape(y1) == (2, 2, 2) assert np.all(y1 == [[[111, 222], [133, 244]], [[355, 466], [377, 488]]]) with pytest.raises(ValueError): y2 = t([[100, 200, 300], [400, 500, 600]]) y2 = t([[[100, 200], [300, 400]], [[500, 600], [700, 800]]]) assert np.shape(y2) == (2, 2, 2) assert np.all(y2 == [[[111, 222], [333, 444]], [[555, 666], [777, 888]]]) def test_mixed_array_parameters_1d_array_input(self): t = TModel_1_1([[[0.01, 0.02, 0.03], [0.04, 0.05, 0.06]], [[0.07, 0.08, 0.09], [0.10, 0.11, 0.12]]], [[1, 2, 3], [4, 5, 6]], n_models=2) with pytest.raises(ValueError): y = t([10, 20, 30]) y = t([10, 20, 30], model_set_axis=False) assert np.shape(y) == (2, 2, 3) assert_allclose(y, [[[11.01, 22.02, 33.03], [11.04, 22.05, 33.06]], [[14.07, 25.08, 36.09], [14.10, 25.11, 36.12]]]) class TModel_1_2(FittableModel): inputs = ('x',) outputs = ('y', 'z') p1 = Parameter() p2 = Parameter() p3 = Parameter() @staticmethod def evaluate(x, p1, p2, p3): return (x + p1 + p2, x + p1 + p2 + p3) class TestSingleInputDoubleOutputSingleModel: """ A suite of tests to check various cases of parameter and input combinations on models with n_input = 1 but n_output = 2 on a toy model with n_models=1. As of writing there are not enough controls to adjust how outputs from such a model should be formatted (currently the shapes of outputs are assumed to be directly associated with the shapes of corresponding inputs when n_inputs == n_outputs). For now, the approach taken for cases like this is to assume all outputs should have the same format. """ def test_scalar_parameters_scalar_input(self): """ Scalar parameters with a scalar input should return a scalar. """ t = TModel_1_2(1, 10, 1000) y, z = t(100) assert isinstance(y, float) assert isinstance(z, float) assert np.ndim(y) == np.ndim(z) == 0 assert y == 111 assert z == 1111 def test_scalar_parameters_1d_array_input(self): """ Scalar parameters should broadcast with an array input to result in an array output of the same shape as the input. """ t = TModel_1_2(1, 10, 1000) y, z = t(np.arange(5) * 100) assert isinstance(y, np.ndarray) assert isinstance(z, np.ndarray) assert np.shape(y) == np.shape(z) == (5,) assert np.all(y == [11, 111, 211, 311, 411]) assert np.all(z == (y + 1000)) def test_scalar_parameters_2d_array_input(self): """ Scalar parameters should broadcast with an array input to result in an array output of the same shape as the input. """ t = TModel_1_2(1, 10, 1000) y, z = t(np.arange(6).reshape(2, 3) * 100) assert isinstance(y, np.ndarray) assert isinstance(z, np.ndarray) assert np.shape(y) == np.shape(z) == (2, 3) assert np.all(y == [[11, 111, 211], [311, 411, 511]]) assert np.all(z == (y + 1000)) def test_scalar_parameters_3d_array_input(self): """ Scalar parameters should broadcast with an array input to result in an array output of the same shape as the input. """ t = TModel_1_2(1, 10, 1000) y, z = t(np.arange(12).reshape(2, 3, 2) * 100) assert isinstance(y, np.ndarray) assert isinstance(z, np.ndarray) assert np.shape(y) == np.shape(z) == (2, 3, 2) assert np.all(y == [[[11, 111], [211, 311], [411, 511]], [[611, 711], [811, 911], [1011, 1111]]]) assert np.all(z == (y + 1000)) def test_1d_array_parameters_scalar_input(self): """ Array parameters should all be broadcastable with each other, and with a scalar input the output should be broadcast to the maximum dimensions of the parameters. """ t = TModel_1_2([1, 2], [10, 20], [1000, 2000]) y, z = t(100) assert isinstance(y, np.ndarray) assert isinstance(z, np.ndarray) assert np.shape(y) == np.shape(z) == (2,) assert np.all(y == [111, 122]) assert np.all(z == [1111, 2122]) def test_1d_array_parameters_1d_array_input(self): """ When given an array input it must be broadcastable with all the parameters. """ t = TModel_1_2([1, 2], [10, 20], [1000, 2000]) y1, z1 = t([100, 200]) assert np.shape(y1) == np.shape(z1) == (2,) assert np.all(y1 == [111, 222]) assert np.all(z1 == [1111, 2222]) y2, z2 = t([[100], [200]]) assert np.shape(y2) == np.shape(z2) == (2, 2) assert np.all(y2 == [[111, 122], [211, 222]]) assert np.all(z2 == [[1111, 2122], [1211, 2222]]) with pytest.raises(ValueError): # Doesn't broadcast y3, z3 = t([100, 200, 300]) def test_2d_array_parameters_2d_array_input(self): """ When given an array input it must be broadcastable with all the parameters. """ t = TModel_1_2([[1, 2], [3, 4]], [[10, 20], [30, 40]], [[1000, 2000], [3000, 4000]]) y1, z1 = t([[100, 200], [300, 400]]) assert np.shape(y1) == np.shape(z1) == (2, 2) assert np.all(y1 == [[111, 222], [333, 444]]) assert np.all(z1 == [[1111, 2222], [3333, 4444]]) y2, z2 = t([[[[100]], [[200]]], [[[300]], [[400]]]]) assert np.shape(y2) == np.shape(z2) == (2, 2, 2, 2) assert np.all(y2 == [[[[111, 122], [133, 144]], [[211, 222], [233, 244]]], [[[311, 322], [333, 344]], [[411, 422], [433, 444]]]]) assert np.all(z2 == [[[[1111, 2122], [3133, 4144]], [[1211, 2222], [3233, 4244]]], [[[1311, 2322], [3333, 4344]], [[1411, 2422], [3433, 4444]]]]) with pytest.raises(ValueError): # Doesn't broadcast y3, z3 = t([[100, 200, 300], [400, 500, 600]]) def test_mixed_array_parameters_1d_array_input(self): """ When given an array input it must be broadcastable with all the parameters. """ t = TModel_1_2([[[0.01, 0.02, 0.03], [0.04, 0.05, 0.06]], [[0.07, 0.08, 0.09], [0.10, 0.11, 0.12]]], [1, 2, 3], [100, 200, 300]) y1, z1 = t([10, 20, 30]) assert np.shape(y1) == np.shape(z1) == (2, 2, 3) assert_allclose(y1, [[[11.01, 22.02, 33.03], [11.04, 22.05, 33.06]], [[11.07, 22.08, 33.09], [11.10, 22.11, 33.12]]]) assert_allclose(z1, [[[111.01, 222.02, 333.03], [111.04, 222.05, 333.06]], [[111.07, 222.08, 333.09], [111.10, 222.11, 333.12]]]) y2, z2 = t([[[[10]]], [[[20]]], [[[30]]]]) assert np.shape(y2) == np.shape(z2) == (3, 2, 2, 3) assert_allclose(y2, [[[[11.01, 12.02, 13.03], [11.04, 12.05, 13.06]], [[11.07, 12.08, 13.09], [11.10, 12.11, 13.12]]], [[[21.01, 22.02, 23.03], [21.04, 22.05, 23.06]], [[21.07, 22.08, 23.09], [21.10, 22.11, 23.12]]], [[[31.01, 32.02, 33.03], [31.04, 32.05, 33.06]], [[31.07, 32.08, 33.09], [31.10, 32.11, 33.12]]]]) assert_allclose(z2, [[[[111.01, 212.02, 313.03], [111.04, 212.05, 313.06]], [[111.07, 212.08, 313.09], [111.10, 212.11, 313.12]]], [[[121.01, 222.02, 323.03], [121.04, 222.05, 323.06]], [[121.07, 222.08, 323.09], [121.10, 222.11, 323.12]]], [[[131.01, 232.02, 333.03], [131.04, 232.05, 333.06]], [[131.07, 232.08, 333.09], [131.10, 232.11, 333.12]]]]) class TInputFormatter(Model): """ A toy model to test input/output formatting. """ inputs = ('x', 'y') outputs = ('x', 'y') @staticmethod def evaluate(x, y): return x, y def test_format_input_scalars(): model = TInputFormatter() result = model(1, 2) assert result == (1, 2) def test_format_input_arrays(): model = TInputFormatter() result = model([1, 1], [2, 2]) assert_allclose(result, (np.array([1, 1]), np.array([2, 2]))) def test_format_input_arrays_transposed(): model = TInputFormatter() input = np.array([[1, 1]]).T, np.array([[2, 2]]).T result = model(*input) assert_allclose(result, input)
e6dd3a49c71d2a4f9cb53672c624d86f75bac4a8f5142cfa691f2c3115babb85
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Tests that relate to using quantities/units on parameters of models. """ import numpy as np import pytest from ..core import Model, Fittable1DModel, InputParameterError from ..parameters import Parameter, ParameterDefinitionError from ..models import (Gaussian1D, Pix2Sky_TAN, RotateNative2Celestial, Rotation2D) from ... import units as u from ...units import UnitsError from ...tests.helper import assert_quantity_allclose from ... import coordinates as coord class BaseTestModel(Fittable1DModel): @staticmethod def evaluate(x, a): return x def test_parameter_quantity(): """ Basic tests for initializing general models (that do not require units) with parameters that have units attached. """ g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m) assert g.amplitude.value == 1.0 assert g.amplitude.unit is u.J assert g.mean.value == 1.0 assert g.mean.unit is u.m assert g.stddev.value == 0.1 assert g.stddev.unit is u.m def test_parameter_set_quantity(): """ Make sure that parameters that start off as quantities can be set to any other quantity, regardless of whether the units of the new quantity are compatible with the original ones. We basically leave it up to the evaluate method to raise errors if there are issues with incompatible units, and we don't check for consistency at the parameter level. """ g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m) # Try equivalent units g.amplitude = 4 * u.kJ assert_quantity_allclose(g.amplitude, 4 * u.kJ) g.mean = 3 * u.km assert_quantity_allclose(g.mean, 3 * u.km) g.stddev = 2 * u.mm assert_quantity_allclose(g.stddev, 2 * u.mm) # Try different units g.amplitude = 2 * u.s assert_quantity_allclose(g.amplitude, 2 * u.s) g.mean = 2 * u.Jy assert_quantity_allclose(g.mean, 2 * u.Jy) def test_parameter_lose_units(): """ Check that parameters that have been set to a quantity that are then set to a value with no units raise an exception. We do this because setting a parameter to a value with no units is ambiguous if units were set before: if a paramter is 1 * u.Jy and the parameter is then set to 4, does this mean 2 without units, or 2 * u.Jy? """ g = Gaussian1D(1 * u.Jy, 3, 0.1) with pytest.raises(UnitsError) as exc: g.amplitude = 2 assert exc.value.args[0] == ("The 'amplitude' parameter should be given as " "a Quantity because it was originally " "initialized as a Quantity") def test_parameter_add_units(): """ On the other hand, if starting from a parameter with no units, we should be able to add units since this is unambiguous. """ g = Gaussian1D(1, 3, 0.1) g.amplitude = 2 * u.Jy assert_quantity_allclose(g.amplitude, 2 * u.Jy) def test_parameter_change_unit(): """ Test that changing the unit on a parameter does not work. This is an ambiguous operation because it's not clear if it means that the value should be converted or if the unit should be changed without conversion. """ g = Gaussian1D(1, 1 * u.m, 0.1 * u.m) # Setting a unit on a unitless parameter should not work with pytest.raises(ValueError) as exc: g.amplitude.unit = u.Jy assert exc.value.args[0] == ("Cannot attach units to parameters that were " "not initially specified with units") # But changing to another unit should not, even if it is an equivalent unit with pytest.raises(ValueError) as exc: g.mean.unit = u.cm assert exc.value.args[0] == ("Cannot change the unit attribute directly, " "instead change the parameter to a new quantity") def test_parameter_set_value(): """ Test that changing the value on a parameter works as expected. """ g = Gaussian1D(1 * u.Jy, 1 * u.m, 0.1 * u.m) # To set a parameter to a quantity, we simply do g.amplitude = 2 * u.Jy # If we try setting the value, we need to pass a non-quantity value # TODO: determine whether this is the desired behavior? g.amplitude.value = 4 assert_quantity_allclose(g.amplitude, 4 * u.Jy) assert g.amplitude.value == 4 assert g.amplitude.unit is u.Jy # If we try setting it to a Quantity, we raise an error with pytest.raises(TypeError) as exc: g.amplitude.value = 3 * u.Jy assert exc.value.args[0] == ("The .value property on parameters should be set to " "unitless values, not Quantity objects. To set a " "parameter to a quantity simply set the parameter " "directly without using .value") def test_parameter_quantity_property(): """ Test that the quantity property of Parameters behaves as expected """ # Since parameters have a .value and .unit parameter that return just the # value and unit respectively, we also have a .quantity parameter that # returns a Quantity instance. g = Gaussian1D(1 * u.Jy, 1 * u.m, 0.1 * u.m) assert_quantity_allclose(g.amplitude.quantity, 1 * u.Jy) # Setting a parameter to a quantity changes the value and the default unit g.amplitude.quantity = 5 * u.mJy assert g.amplitude.value == 5 assert g.amplitude.unit is u.mJy # And we can also set the parameter to a value with different units g.amplitude.quantity = 4 * u.s assert g.amplitude.value == 4 assert g.amplitude.unit is u.s # But not to a value without units with pytest.raises(TypeError) as exc: g.amplitude.quantity = 3 assert exc.value.args[0] == "The .quantity attribute should be set to a Quantity object" def test_parameter_default_units_match(): # If the unit and default quantity units are different, raise an error with pytest.raises(ParameterDefinitionError) as exc: class TestC(Fittable1DModel): a = Parameter(default=1.0 * u.m, unit=u.Jy) assert exc.value.args[0] == ("parameter default 1.0 m does not have units " "equivalent to the required unit Jy") @pytest.mark.parametrize(('unit', 'default'), ((u.m, 1.0), (None, 1 * u.m))) def test_parameter_defaults(unit, default): """ Test that default quantities are correctly taken into account """ class TestModel(BaseTestModel): a = Parameter(default=default, unit=unit) # TODO: decide whether the default property should return a value or # a quantity? # The default unit and value should be set on the class assert TestModel.a.unit == u.m assert TestModel.a.default == 1.0 # Check that the default unit and value are also set on a class instance m = TestModel() assert m.a.unit == u.m assert m.a.default == m.a.value == 1.0 # If the parameter is set to a different value, the default is still the # internal default m = TestModel(2.0 * u.m) assert m.a.unit == u.m assert m.a.value == 2.0 assert m.a.default == 1.0 # Instantiate with a different, but compatible unit m = TestModel(2.0 * u.pc) assert m.a.unit == u.pc assert m.a.value == 2.0 # The default is still in the original units # TODO: but how do we know what those units are if we don't return a # quantity? assert m.a.default == 1.0 # Initialize with a completely different unit m = TestModel(2.0 * u.Jy) assert m.a.unit == u.Jy assert m.a.value == 2.0 # TODO: this illustrates why the default doesn't make sense anymore assert m.a.default == 1.0 # Instantiating with different units works, and just replaces the original unit with pytest.raises(InputParameterError) as exc: TestModel(1.0) assert exc.value.args[0] == ("TestModel.__init__() requires a " "Quantity for parameter 'a'") def test_parameter_quantity_arithmetic(): """ Test that arithmetic operations with properties that have units return the appropriate Quantities. """ g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m) # Addition should work if units are compatible assert g.mean + (1 * u.m) == 2 * u.m assert (1 * u.m) + g.mean == 2 * u.m # Multiplication by a scalar should also preserve the quantity-ness assert g.mean * 2 == (2 * u.m) assert 2 * g.mean == (2 * u.m) # Multiplication by a quantity should result in units being multiplied assert g.mean * (2 * u.m) == (2 * (u.m ** 2)) assert (2 * u.m) * g.mean == (2 * (u.m ** 2)) # Negation should work properly too assert -g.mean == (-1 * u.m) assert abs(-g.mean) == g.mean # However, addition of a quantity + scalar should not work with pytest.raises(UnitsError) as exc: g.mean + 1 assert exc.value.args[0] == ("Can only apply 'add' function to " "dimensionless quantities when other argument " "is not a quantity (unless the latter is all " "zero/infinity/nan)") with pytest.raises(UnitsError) as exc: 1 + g.mean assert exc.value.args[0] == ("Can only apply 'add' function to " "dimensionless quantities when other argument " "is not a quantity (unless the latter is all " "zero/infinity/nan)") def test_parameter_quantity_comparison(): """ Basic test of comparison operations on properties with units. """ g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m) # Essentially here we are checking that parameters behave like Quantity assert g.mean == 1 * u.m assert 1 * u.m == g.mean assert g.mean != 1 assert 1 != g.mean assert g.mean < 2 * u.m assert 2 * u.m > g.mean with pytest.raises(UnitsError) as exc: g.mean < 2 assert exc.value.args[0] == ("Can only apply 'less' function to " "dimensionless quantities when other argument " "is not a quantity (unless the latter is all " "zero/infinity/nan)") with pytest.raises(UnitsError) as exc: 2 > g.mean assert exc.value.args[0] == ("Can only apply 'less' function to " "dimensionless quantities when other argument " "is not a quantity (unless the latter is all " "zero/infinity/nan)") g = Gaussian1D([1, 2] * u.J, [1, 2] * u.m, [0.1, 0.2] * u.m) assert np.all(g.mean == [1, 2] * u.m) assert np.all([1, 2] * u.m == g.mean) assert np.all(g.mean != [1, 2]) assert np.all([1, 2] != g.mean) with pytest.raises(UnitsError) as exc: g.mean < [3, 4] assert exc.value.args[0] == ("Can only apply 'less' function to " "dimensionless quantities when other argument " "is not a quantity (unless the latter is all " "zero/infinity/nan)") with pytest.raises(UnitsError) as exc: [3, 4] > g.mean assert exc.value.args[0] == ("Can only apply 'less' function to " "dimensionless quantities when other argument " "is not a quantity (unless the latter is all " "zero/infinity/nan)") def test_parameters_compound_models(): tan = Pix2Sky_TAN() sky_coords = coord.SkyCoord(ra=5.6, dec=-72, unit=u.deg) lon_pole = 180 * u.deg n2c = RotateNative2Celestial(sky_coords.ra, sky_coords.dec, lon_pole) rot = Rotation2D(23) m = rot | n2c
10af7adeaa71da384b48b3fe37231f24855b4aad038fa336fdce7b8b2bad8327
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Tests for polynomial models.""" import os from itertools import product import pytest import numpy as np from numpy.testing.utils import assert_allclose from .. import fitting from ... import wcs from ...io import fits from ..polynomial import (Chebyshev1D, Hermite1D, Legendre1D, Polynomial1D, Chebyshev2D, Hermite2D, Legendre2D, Polynomial2D, SIP, PolynomialBase, OrthoPolynomialBase) from ..functional_models import Linear1D from ..mappings import Identity from ...utils.data import get_pkg_data_filename try: from scipy import optimize # pylint: disable=W0611 HAS_SCIPY = True except ImportError: HAS_SCIPY = False linear1d = { Chebyshev1D: { 'args': (3,), 'kwargs': {'domain': [1, 10]}, 'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2}, 'constraints': {'fixed': {'c0': 1.2}} }, Hermite1D: { 'args': (3,), 'kwargs': {'domain': [1, 10]}, 'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2}, 'constraints': {'fixed': {'c0': 1.2}} }, Legendre1D: { 'args': (3,), 'kwargs': {'domain': [1, 10]}, 'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2}, 'constraints': {'fixed': {'c0': 1.2}} }, Polynomial1D: { 'args': (3,), 'kwargs': {'domain': [1, 10]}, 'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2}, 'constraints': {'fixed': {'c0': 1.2}} }, Linear1D: { 'args': (), 'kwargs': {}, 'parameters': {'intercept': 1.2, 'slope': 23.1}, 'constraints': {'fixed': {'intercept': 1.2}} } } linear2d = { Chebyshev2D: { 'args': (1, 1), 'kwargs': {'x_domain': [0, 99], 'y_domain': [0, 82]}, 'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3, 'c1_1': 0.2}, 'constraints': {'fixed': {'c0_0': 1.2}} }, Hermite2D: { 'args': (1, 1), 'kwargs': {'x_domain': [0, 99], 'y_domain': [0, 82]}, 'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3, 'c1_1': 0.2}, 'constraints': {'fixed': {'c0_0': 1.2}} }, Legendre2D: { 'args': (1, 1), 'kwargs': {'x_domain': [0, 99], 'y_domain': [0, 82]}, 'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3, 'c1_1': 0.2}, 'constraints': {'fixed': {'c0_0': 1.2}} }, Polynomial2D: { 'args': (1,), 'kwargs': {}, 'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3}, 'constraints': {'fixed': {'c0_0': 1.2}} } } @pytest.mark.skipif('not HAS_SCIPY') class TestFitting: """Test linear fitter with polynomial models.""" def setup_class(self): self.N = 100 self.M = 100 self.x1 = np.linspace(1, 10, 100) self.y2, self.x2 = np.mgrid[:100, :83] rsn = np.random.RandomState(0) self.n1 = rsn.randn(self.x1.size) * .1 self.n2 = rsn.randn(self.x2.size) self.n2.shape = self.x2.shape self.linear_fitter = fitting.LinearLSQFitter() self.non_linear_fitter = fitting.LevMarLSQFitter() # TODO: Most of these test cases have some pretty repetitive setup that we # could probably factor out @pytest.mark.parametrize(('model_class', 'constraints'), list(product(sorted(linear1d, key=str), (False, True)))) def test_linear_fitter_1D(self, model_class, constraints): """Test fitting with LinearLSQFitter""" model_args = linear1d[model_class] kwargs = {} kwargs.update(model_args['kwargs']) kwargs.update(model_args['parameters']) if constraints: kwargs.update(model_args['constraints']) model = model_class(*model_args['args'], **kwargs) y1 = model(self.x1) model_lin = self.linear_fitter(model, self.x1, y1 + self.n1) if constraints: # For the constraints tests we're not checking the overall fit, # just that the constraint was maintained fixed = model_args['constraints'].get('fixed', None) if fixed: for param, value in fixed.items(): expected = model_args['parameters'][param] assert getattr(model_lin, param).value == expected else: assert_allclose(model_lin.parameters, model.parameters, atol=0.2) @pytest.mark.parametrize(('model_class', 'constraints'), list(product(sorted(linear1d, key=str), (False, True)))) def test_non_linear_fitter_1D(self, model_class, constraints): """Test fitting with non-linear LevMarLSQFitter""" model_args = linear1d[model_class] kwargs = {} kwargs.update(model_args['kwargs']) kwargs.update(model_args['parameters']) if constraints: kwargs.update(model_args['constraints']) model = model_class(*model_args['args'], **kwargs) y1 = model(self.x1) model_nlin = self.non_linear_fitter(model, self.x1, y1 + self.n1) if constraints: fixed = model_args['constraints'].get('fixed', None) if fixed: for param, value in fixed.items(): expected = model_args['parameters'][param] assert getattr(model_nlin, param).value == expected else: assert_allclose(model_nlin.parameters, model.parameters, atol=0.2) @pytest.mark.parametrize(('model_class', 'constraints'), list(product(sorted(linear2d, key=str), (False, True)))) def test_linear_fitter_2D(self, model_class, constraints): """Test fitting with LinearLSQFitter""" model_args = linear2d[model_class] kwargs = {} kwargs.update(model_args['kwargs']) kwargs.update(model_args['parameters']) if constraints: kwargs.update(model_args['constraints']) model = model_class(*model_args['args'], **kwargs) z = model(self.x2, self.y2) model_lin = self.linear_fitter(model, self.x2, self.y2, z + self.n2) if constraints: fixed = model_args['constraints'].get('fixed', None) if fixed: for param, value in fixed.items(): expected = model_args['parameters'][param] assert getattr(model_lin, param).value == expected else: assert_allclose(model_lin.parameters, model.parameters, atol=0.2) @pytest.mark.parametrize(('model_class', 'constraints'), list(product(sorted(linear2d, key=str), (False, True)))) def test_non_linear_fitter_2D(self, model_class, constraints): """Test fitting with non-linear LevMarLSQFitter""" model_args = linear2d[model_class] kwargs = {} kwargs.update(model_args['kwargs']) kwargs.update(model_args['parameters']) if constraints: kwargs.update(model_args['constraints']) model = model_class(*model_args['args'], **kwargs) z = model(self.x2, self.y2) model_nlin = self.non_linear_fitter(model, self.x2, self.y2, z + self.n2) if constraints: fixed = model_args['constraints'].get('fixed', None) if fixed: for param, value in fixed.items(): expected = model_args['parameters'][param] assert getattr(model_nlin, param).value == expected else: assert_allclose(model_nlin.parameters, model.parameters, atol=0.2) @pytest.mark.parametrize('model_class', [cls for cls in list(linear1d) + list(linear2d) if isinstance(cls, PolynomialBase)]) def test_polynomial_init_with_constraints(model_class): """ Test that polynomial models can be instantiated with constraints, but no parameters specified. Regression test for https://github.com/astropy/astropy/issues/3606 """ # Just determine which parameter to place a constraint on; it doesn't # matter which parameter it is to exhibit the problem so long as it's a # valid parameter for the model if '1D' in model_class.__name__: param = 'c0' else: param = 'c0_0' if issubclass(model_class, OrthoPolynomialBase): degree = (2, 2) else: degree = (2,) m = model_class(*degree, fixed={param: True}) assert m.fixed[param] is True assert getattr(m, param).fixed is True def test_sip_hst(): """Test SIP against astropy.wcs""" test_file = get_pkg_data_filename(os.path.join('data', 'hst_sip.hdr')) hdr = fits.Header.fromtextfile(test_file) crpix1 = hdr['CRPIX1'] crpix2 = hdr['CRPIX2'] wobj = wcs.WCS(hdr) a_pars = dict(**hdr['A_*']) b_pars = dict(**hdr['B_*']) a_order = a_pars.pop('A_ORDER') b_order = b_pars.pop('B_ORDER') sip = SIP([crpix1, crpix2], a_order, b_order, a_pars, b_pars) coords = [1, 1] rel_coords = [1 - crpix1, 1 - crpix2] astwcs_result = wobj.sip_pix2foc([coords], 1)[0] - rel_coords assert_allclose(sip(1, 1), astwcs_result) def test_sip_irac(): """Test forward and inverse SIP againts astropy.wcs""" test_file = get_pkg_data_filename(os.path.join('data', 'irac_sip.hdr')) hdr = fits.Header.fromtextfile(test_file) crpix1 = hdr['CRPIX1'] crpix2 = hdr['CRPIX2'] wobj = wcs.WCS(hdr) a_pars = dict(**hdr['A_*']) b_pars = dict(**hdr['B_*']) ap_pars = dict(**hdr['AP_*']) bp_pars = dict(**hdr['BP_*']) a_order = a_pars.pop('A_ORDER') b_order = b_pars.pop('B_ORDER') ap_order = ap_pars.pop('AP_ORDER') bp_order = bp_pars.pop('BP_ORDER') del a_pars['A_DMAX'] del b_pars['B_DMAX'] pix = [200, 200] rel_pix = [200 - crpix1, 200 - crpix2] sip = SIP([crpix1, crpix2], a_order, b_order, a_pars, b_pars, ap_order=ap_order, ap_coeff=ap_pars, bp_order=bp_order, bp_coeff=bp_pars) foc = wobj.sip_pix2foc([pix], 1) newpix = wobj.sip_foc2pix(foc, 1)[0] assert_allclose(sip(*pix), foc[0] - rel_pix) assert_allclose(sip.inverse(*foc[0]) + foc[0] - rel_pix, newpix - pix) def test_sip_no_coeff(): sip = SIP([10, 12], 2, 2) assert_allclose(sip.sip1d_a.parameters, [0., 0., 0]) assert_allclose(sip.sip1d_b.parameters, [0., 0., 0]) with pytest.raises(NotImplementedError): sip.inverse @pytest.mark.parametrize('cls', (Polynomial1D, Chebyshev1D, Legendre1D, Polynomial2D, Chebyshev2D, Legendre2D)) def test_zero_degree_polynomial(cls): """ A few tests that degree=0 polynomials are correctly evaluated and fitted. Regression test for https://github.com/astropy/astropy/pull/3589 """ if cls.n_inputs == 1: # Test 1D polynomials p1 = cls(degree=0, c0=1) assert p1(0) == 1 assert np.all(p1(np.zeros(5)) == np.ones(5)) x = np.linspace(0, 1, 100) # Add a little noise along a straight line y = 1 + np.random.uniform(0, 0.1, len(x)) p1_init = cls(degree=0) fitter = fitting.LinearLSQFitter() p1_fit = fitter(p1_init, x, y) # The fit won't be exact of course, but it should get close to within # 1% assert_allclose(p1_fit.c0, 1, atol=0.10) elif cls.n_inputs == 2: # Test 2D polynomials if issubclass(cls, OrthoPolynomialBase): p2 = cls(x_degree=0, y_degree=0, c0_0=1) else: p2 = cls(degree=0, c0_0=1) assert p2(0, 0) == 1 assert np.all(p2(np.zeros(5), np.zeros(5)) == np.ones(5)) y, x = np.mgrid[0:1:100j, 0:1:100j] z = (1 + np.random.uniform(0, 0.1, x.size)).reshape(100, 100) if issubclass(cls, OrthoPolynomialBase): p2_init = cls(x_degree=0, y_degree=0) else: p2_init = cls(degree=0) fitter = fitting.LinearLSQFitter() p2_fit = fitter(p2_init, x, y, z) assert_allclose(p2_fit.c0_0, 1, atol=0.10) @pytest.mark.skipif('not HAS_SCIPY') def test_2d_orthopolynomial_in_compound_model(): """ Ensure that OrthoPolynomialBase (ie. Chebyshev2D & Legendre2D) models get evaluated & fitted correctly when part of a compound model. Regression test for https://github.com/astropy/astropy/pull/6085. """ y, x = np.mgrid[0:5, 0:5] z = x + y fitter = fitting.LevMarLSQFitter() simple_model = Chebyshev2D(2, 2) simple_fit = fitter(simple_model, x, y, z) fitter = fitting.LevMarLSQFitter() # re-init to compare like with like compound_model = Identity(2) | Chebyshev2D(2, 2) compound_fit = fitter(compound_model, x, y, z) assert_allclose(simple_fit(x, y), compound_fit(x, y), atol=1e-15)
84c1f7797d2977703bc839f4cfde9f8a7e63c323a57d52a5ce165939b24f6197
# Licensed under a 3-clause BSD style license - see LICENSE.rst import inspect from copy import deepcopy import pickle import pytest import numpy as np from numpy.testing.utils import assert_allclose, assert_array_equal from ..core import Model, ModelDefinitionError from ..parameters import Parameter from ..models import (Const1D, Shift, Scale, Rotation2D, Gaussian1D, Gaussian2D, Polynomial1D, Polynomial2D, Chebyshev2D, Legendre2D, Chebyshev1D, Legendre1D, AffineTransformation2D, Identity, Mapping) @pytest.mark.parametrize(('expr', 'result'), [(lambda x, y: x + y, 5.0), (lambda x, y: x - y, -1.0), (lambda x, y: x * y, 6.0), (lambda x, y: x / y, 2.0 / 3.0), (lambda x, y: x ** y, 8.0)]) def test_two_model_class_arithmetic_1d(expr, result): # Const1D is perhaps the simplest model to test basic arithmetic with. # TODO: Should define more tests later on for more complicated # combinations of models S = expr(Const1D, Const1D) assert issubclass(S, Model) assert S.n_inputs == 1 assert S.n_outputs == 1 # Initialize an instance of the model, providing values for the two # "amplitude" parameters s = S(2, 3) # It shouldn't matter what input we evaluate on since this is a constant # function out = s(0) assert out == result assert isinstance(out, float) @pytest.mark.parametrize(('expr', 'result'), [(lambda x, y: x + y, [5.0, 5.0]), (lambda x, y: x - y, [-1.0, -1.0]), (lambda x, y: x * y, [6.0, 6.0]), (lambda x, y: x / y, [2.0 / 3.0, 2.0 / 3.0]), (lambda x, y: x ** y, [8.0, 8.0])]) def test_model_set(expr, result): s = expr(Const1D((2, 2), n_models=2), Const1D((3, 3), n_models=2)) out = s(0, model_set_axis=False) assert_array_equal(out, result) @pytest.mark.parametrize(('expr', 'result'), [(lambda x, y: x + y, [5.0, 5.0]), (lambda x, y: x - y, [-1.0, -1.0]), (lambda x, y: x * y, [6.0, 6.0]), (lambda x, y: x / y, [2.0 / 3.0, 2.0 / 3.0]), (lambda x, y: x ** y, [8.0, 8.0])]) def test_model_set_raises_value_error(expr, result): """Check that creating model sets with components whose _n_models are different raise a value error """ with pytest.raises(ValueError): s = expr(Const1D((2, 2), n_models=2), Const1D(3, n_models=1)) @pytest.mark.parametrize(('expr', 'result'), [(lambda x, y: x + y, 5.0), (lambda x, y: x - y, -1.0), (lambda x, y: x * y, 6.0), (lambda x, y: x / y, 2.0 / 3.0), (lambda x, y: x ** y, 8.0)]) def test_two_model_instance_arithmetic_1d(expr, result): """ Like test_two_model_class_arithmetic_1d, but creates a new model from two model *instances* with fixed parameters. """ s = expr(Const1D(2), Const1D(3)) assert isinstance(s, Model) assert s.n_inputs == 1 assert s.n_outputs == 1 out = s(0) assert out == result assert isinstance(out, float) @pytest.mark.parametrize(('expr', 'result'), [(lambda x, y: x + y, 5.0), (lambda x, y: x - y, -1.0), (lambda x, y: x * y, 6.0), (lambda x, y: x / y, 2.0 / 3.0), (lambda x, y: x ** y, 8.0)]) def test_two_model_mixed_arithmetic_1d(expr, result): """ Like test_two_model_class_arithmetic_1d, but creates a new model from an expression of one model class with one model instance (and vice-versa). """ S1 = expr(Const1D, Const1D(3)) S2 = expr(Const1D(2), Const1D) for cls in (S1, S2): assert issubclass(cls, Model) assert cls.n_inputs == 1 assert cls.n_outputs == 1 # Requires values for both amplitudes even though one of them them has a # default # TODO: We may wish to fix that eventually, so that if a parameter has a # default it doesn't *have* to be given in the init s1 = S1(2, 3) s2 = S2(2, 3) for out in (s1(0), s2(0)): assert out == result assert isinstance(out, float) def test_simple_two_model_class_compose_1d(): """ Shift and Scale are two of the simplest models to test model composition with. """ S1 = Shift | Scale # First shift then scale assert issubclass(S1, Model) assert S1.n_inputs == 1 assert S1.n_outputs == 1 s1 = S1(2, 3) # Shift by 2 and scale by 3 assert s1(1) == 9.0 S2 = Scale | Shift # First scale then shift assert issubclass(S2, Model) assert S2.n_inputs == 1 assert S2.n_outputs == 1 s2 = S2(2, 3) # Scale by 2 then shift by 3 assert s2(1) == 5.0 # Test with array inputs assert_array_equal(s2([1, 2, 3]), [5.0, 7.0, 9.0]) def test_simple_two_model_class_compose_2d(): """ A simple example consisting of two rotations. """ R = Rotation2D | Rotation2D assert issubclass(R, Model) assert R.n_inputs == 2 assert R.n_outputs == 2 r1 = R(45, 45) # Rotate twice by 45 degrees assert_allclose(r1(0, 1), (-1, 0), atol=1e-10) r2 = R(90, 90) # Rotate twice by 90 degrees assert_allclose(r2(0, 1), (0, -1), atol=1e-10) # Compose R with itself to produce 4 rotations R2 = R | R r3 = R2(45, 45, 45, 45) assert_allclose(r3(0, 1), (0, -1), atol=1e-10) def test_n_submodels(): """ Test that CompoundModel.n_submodels properly returns the number of components. """ g2 = Gaussian1D() + Gaussian1D() assert g2.n_submodels() == 2 g3 = g2 + Gaussian1D() assert g3.n_submodels() == 3 g5 = g3 | g2 assert g5.n_submodels() == 5 g7 = g5 / g2 assert g7.n_submodels() == 7 # make sure it works as class method p = Polynomial1D + Polynomial1D assert p.n_submodels() == 2 def test_expression_formatting(): """ Test that the expression strings from compound models are formatted correctly. """ # For the purposes of this test it doesn't matter a great deal what # model(s) are used in the expression, I don't think G = Gaussian1D G2 = Gaussian2D M = G + G assert M._format_expression() == '[0] + [1]' M = G + G + G assert M._format_expression() == '[0] + [1] + [2]' M = G + G * G assert M._format_expression() == '[0] + [1] * [2]' M = G * G + G assert M._format_expression() == '[0] * [1] + [2]' M = G + G * G + G assert M._format_expression() == '[0] + [1] * [2] + [3]' M = (G + G) * (G + G) assert M._format_expression() == '([0] + [1]) * ([2] + [3])' # This example uses parentheses in the expression, but those won't be # preserved in the expression formatting since they technically aren't # necessary, and there's no way to know that they were originally # parenthesized (short of some deep, and probably not worthwhile # introspection) M = (G * G) + (G * G) assert M._format_expression() == '[0] * [1] + [2] * [3]' M = G ** G assert M._format_expression() == '[0] ** [1]' M = G + G ** G assert M._format_expression() == '[0] + [1] ** [2]' M = (G + G) ** G assert M._format_expression() == '([0] + [1]) ** [2]' M = G + G | G assert M._format_expression() == '[0] + [1] | [2]' M = G + (G | G) assert M._format_expression() == '[0] + ([1] | [2])' M = G & G | G2 assert M._format_expression() == '[0] & [1] | [2]' M = G & (G | G) assert M._format_expression() == '[0] & ([1] | [2])' def test_indexing_on_class(): """ Test indexing on compound model class objects, including cases where the submodels are classes, as well as instances, or both. """ g = Gaussian1D(1, 2, 3, name='g') p = Polynomial1D(2, name='p') M = Gaussian1D + Const1D assert M[0] is Gaussian1D assert M[1] is Const1D assert M['Gaussian1D'] is M[0] assert M['Const1D'] is M[1] M = Gaussian1D + p assert M[0] is Gaussian1D assert isinstance(M['p'], Polynomial1D) m = g + p assert isinstance(m[0], Gaussian1D) assert isinstance(m[1], Polynomial1D) assert isinstance(m['g'], Gaussian1D) assert isinstance(m['p'], Polynomial1D) # Test negative indexing assert isinstance(m[-1], Polynomial1D) assert isinstance(m[-2], Gaussian1D) with pytest.raises(IndexError): m[42] with pytest.raises(IndexError): m['foobar'] # TODO: It would be good if there were an easier way to interrogate a compound # model class for what expression it represents. Not sure what that would look # like though. def test_slicing_on_class(): """ Test slicing a simple compound model class using integers. """ A = Const1D.rename('A') B = Const1D.rename('B') C = Const1D.rename('C') D = Const1D.rename('D') E = Const1D.rename('E') F = Const1D.rename('F') M = A + B - C * D / E ** F assert M[0:1] is A # This test will also check that the correct parameter names are generated # for each slice (fairly trivial in this case since all the submodels have # the same parameter, but if any corner cases are found that aren't covered # by this test we can do something different...) assert M[0:1].param_names == ('amplitude',) # This looks goofy but if you slice by name to the sub-model of the same # name it should just return that model, logically. assert M['A':'A'] is A assert M['A':'A'].param_names == ('amplitude',) assert M[5:6] is F assert M[5:6].param_names == ('amplitude',) assert M['F':'F'] is F assert M['F':'F'].param_names == ('amplitude',) # 1 + 2 assert M[:2](1, 2)(0) == 3 assert M[:2].param_names == ('amplitude_0', 'amplitude_1') assert M[:'B'](1, 2)(0) == 3 assert M[:'B'].param_names == ('amplitude_0', 'amplitude_1') # 2 - 3 assert M[1:3](2, 3)(0) == -1 assert M[1:3].param_names == ('amplitude_1', 'amplitude_2') assert M['B':'C'](2, 3)(0) == -1 assert M['B':'C'].param_names == ('amplitude_1', 'amplitude_2') # 3 * 4 assert M[2:4](3, 4)(0) == 12 assert M[2:4].param_names == ('amplitude_2', 'amplitude_3') assert M['C':'D'](3, 4)(0) == 12 assert M['C':'D'].param_names == ('amplitude_2', 'amplitude_3') # 4 / 5 assert M[3:5](4, 5)(0) == 0.8 assert M[3:5].param_names == ('amplitude_3', 'amplitude_4') assert M['D':'E'](4, 5)(0) == 0.8 assert M['D':'E'].param_names == ('amplitude_3', 'amplitude_4') # 5 ** 6 assert M[4:6](5, 6)(0) == 15625 assert M[4:6].param_names == ('amplitude_4', 'amplitude_5') assert M['E':'F'](5, 6)(0) == 15625 assert M['E':'F'].param_names == ('amplitude_4', 'amplitude_5') def test_slicing_on_instance(): """ Test slicing a simple compound model class using integers. """ A = Const1D.rename('A') B = Const1D.rename('B') C = Const1D.rename('C') D = Const1D.rename('D') E = Const1D.rename('E') F = Const1D.rename('F') M = A + B - C * D / E ** F m = M(1, 2, 3, 4, 5, 6) assert isinstance(m[0:1], A) assert isinstance(m['A':'A'], A) assert isinstance(m[5:6], F) assert isinstance(m['F':'F'], F) # 1 + 2 assert m[:'B'](0) == 3 assert m[:'B'].param_names == ('amplitude_0', 'amplitude_1') assert np.all(m[:'B'].parameters == [1, 2]) # 2 - 3 assert m['B':'C'](0) == -1 assert m['B':'C'].param_names == ('amplitude_1', 'amplitude_2') assert np.all(m['B':'C'].parameters == [2, 3]) # 3 * 4 assert m['C':'D'](0) == 12 assert m['C':'D'].param_names == ('amplitude_2', 'amplitude_3') assert np.all(m['C':'D'].parameters == [3, 4]) # 4 / 5 assert m['D':'E'](0) == 0.8 assert m['D':'E'].param_names == ('amplitude_3', 'amplitude_4') assert np.all(m['D':'E'].parameters == [4, 5]) # 5 ** 6 assert m['E':'F'](0) == 15625 assert m['E':'F'].param_names == ('amplitude_4', 'amplitude_5') assert np.all(m['E':'F'].parameters == [5, 6]) def test_indexing_on_instance(): """Test indexing on compound model instances.""" M = Gaussian1D + Const1D m = M(1, 0, 0.1, 2) assert isinstance(m[0], Gaussian1D) assert isinstance(m[1], Const1D) assert isinstance(m['Gaussian1D'], Gaussian1D) assert isinstance(m['Const1D'], Const1D) # Test parameter equivalence assert m[0].amplitude == 1 == m.amplitude_0 assert m[0].mean == 0 == m.mean_0 assert m[0].stddev == 0.1 == m.stddev_0 assert m[1].amplitude == 2 == m.amplitude_1 # Test that parameter value updates are symmetric between the compound # model and the submodel returned by indexing const = m[1] m.amplitude_1 = 42 assert const.amplitude == 42 const.amplitude = 137 assert m.amplitude_1 == 137 # Similar couple of tests, but now where the compound model was created # from model instances g = Gaussian1D(1, 2, 3, name='g') p = Polynomial1D(2, name='p') m = g + p assert m[0].name == 'g' assert m[1].name == 'p' assert m['g'].name == 'g' assert m['p'].name == 'p' poly = m[1] m.c0_1 = 12345 assert poly.c0 == 12345 poly.c1 = 6789 assert m.c1_1 == 6789 # Ensure this did *not* modify the original models we used as templates assert p.c0 == 0 assert p.c1 == 0 # Test negative indexing assert isinstance(m[-1], Polynomial1D) assert isinstance(m[-2], Gaussian1D) with pytest.raises(IndexError): m[42] with pytest.raises(IndexError): m['foobar'] def test_basic_compound_inverse(): """ Test basic inversion of compound models in the limited sense supported for models made from compositions and joins only. """ t = (Shift(2) & Shift(3)) | (Scale(2) & Scale(3)) | Rotation2D(90) assert_allclose(t.inverse(*t(0, 1)), (0, 1)) @pytest.mark.parametrize('model', [ Shift(0) + Shift(0) | Shift(0), Shift(0) - Shift(0) | Shift(0), Shift(0) * Shift(0) | Shift(0), Shift(0) / Shift(0) | Shift(0), Shift(0) ** Shift(0) | Shift(0), Gaussian1D(1, 2, 3) | Gaussian1D(4, 5, 6)]) def test_compound_unsupported_inverse(model): """ Ensure inverses aren't supported in cases where it shouldn't be. """ with pytest.raises(NotImplementedError): model.inverse def test_mapping_basic_permutations(): """ Tests a couple basic examples of the Mapping model--specifically examples that merely permute the outputs. """ x, y = Rotation2D(90)(1, 2) RS = Rotation2D | Mapping((1, 0)) x_prime, y_prime = RS(90)(1, 2) assert_allclose((x, y), (y_prime, x_prime)) # A more complicated permutation M = Rotation2D & Scale m = M(90, 2) x, y, z = m(1, 2, 3) MS = M | Mapping((2, 0, 1)) ms = MS(90, 2) x_prime, y_prime, z_prime = ms(1, 2, 3) assert_allclose((x, y, z), (y_prime, z_prime, x_prime)) def test_mapping_inverse(): """Tests inverting a compound model that includes a `Mapping`.""" RS = Rotation2D & Scale # Rotates 2 of the coordinates and scales the third--then rotates on a # different axis and scales on the axis of rotation. No physical meaning # here just a simple test M = RS | Mapping([2, 0, 1]) | RS m = M(12.1, 13.2, 14.3, 15.4) assert_allclose((0, 1, 2), m.inverse(*m(0, 1, 2)), atol=1e-08) def test_identity_input(): """ Test a case where an Identity (or Mapping) model is the first in a chain of composite models and thus is responsible for handling input broadcasting properly. Regression test for https://github.com/astropy/astropy/pull/3362 """ ident1 = Identity(1) shift = Shift(1) rotation = Rotation2D(angle=90) model = ident1 & shift | rotation assert_allclose(model(1, 2), [-3.0, 1.0]) # Same test case but using class composition TestModel = ident1 & Shift | Rotation2D model = TestModel(offset_1=1, angle_2=90) assert_allclose(model(1, 2), [-3.0, 1.0]) def test_slicing_on_instances_2(): """ More slicing tests. Regression test for https://github.com/embray/astropy/pull/10 """ model_a = Shift(1, name='a') model_b = Shift(2, name='b') model_c = Rotation2D(3, name='c') model_d = Scale(2, name='d') model_e = Scale(3, name='e') m = (model_a & model_b) | model_c | (model_d & model_e) with pytest.raises(ModelDefinitionError): # The slice can't actually be taken since the resulting model cannot be # evaluated assert m[1:].submodel_names == ('b', 'c', 'd', 'e') assert m[:].submodel_names == ('a', 'b', 'c', 'd', 'e') assert m['a':].submodel_names == ('a', 'b', 'c', 'd', 'e') with pytest.raises(ModelDefinitionError): assert m['c':'d'].submodel_names == ('c', 'd') assert m[1:2].name == 'b' assert m[2:7].submodel_names == ('c', 'd', 'e') with pytest.raises(IndexError): m['x'] with pytest.raises(IndexError): m['a': 'r'] with pytest.raises(ModelDefinitionError): assert m[-4:4].submodel_names == ('b', 'c', 'd') with pytest.raises(ModelDefinitionError): assert m[-4:-2].submodel_names == ('b', 'c') def test_slicing_on_instances_3(): """ Like `test_slicing_on_instances_2` but uses a compound model that does not have any invalid slices due to the resulting model being invalid (originally test_slicing_on_instances_2 passed without any ModelDefinitionErrors being raised, but that was before we prevented invalid models from being created). """ model_a = Shift(1, name='a') model_b = Shift(2, name='b') model_c = Gaussian1D(3, 0, 0.1, name='c') model_d = Scale(2, name='d') model_e = Scale(3, name='e') m = (model_a + model_b) | model_c | (model_d + model_e) assert m[1:].submodel_names == ('b', 'c', 'd', 'e') assert m[:].submodel_names == ('a', 'b', 'c', 'd', 'e') assert m['a':].submodel_names == ('a', 'b', 'c', 'd', 'e') assert m['c':'d'].submodel_names == ('c', 'd') assert m[1:2].name == 'b' assert m[2:7].submodel_names == ('c', 'd', 'e') with pytest.raises(IndexError): m['x'] with pytest.raises(IndexError): m['a': 'r'] assert m[-4:4].submodel_names == ('b', 'c', 'd') assert m[-4:-2].submodel_names == ('b', 'c') def test_slicing_on_instance_with_parameterless_model(): """ Regression test to fix an issue where the indices attached to parameter names on a compound model were not handled properly when one or more submodels have no parameters. This was especially evident in slicing. """ p2 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3) p1 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3) mapping = Mapping((0, 1, 0, 1)) offx = Shift(-2, name='x_translation') offy = Shift(-1, name='y_translation') aff = AffineTransformation2D(matrix=[[1, 2], [3, 4]], name='rotation') model = mapping | (p1 & p2) | (offx & offy) | aff assert model.param_names == ('c0_0_1', 'c1_0_1', 'c0_1_1', 'c0_0_2', 'c1_0_2', 'c0_1_2', 'offset_3', 'offset_4', 'matrix_5', 'translation_5') assert model(1, 2) == (23.0, 53.0) m = model[3:] assert m.param_names == ('offset_3', 'offset_4', 'matrix_5', 'translation_5') assert m(1, 2) == (1.0, 1.0) def test_compound_model_with_nonstandard_broadcasting(): """ Ensure that the ``standard_broadcasting`` flag is properly propagated when creating compound models. See the commit message for the commit in which this was added for more details. """ offx = Shift(1) offy = Shift(2) rot = AffineTransformation2D([[0, -1], [1, 0]]) m = (offx & offy) | rot x, y = m(0, 0) assert x == -2 assert y == 1 # make sure conversion back to scalars is working properly assert isinstance(x, float) assert isinstance(y, float) x, y = m([0, 1, 2], [0, 1, 2]) assert np.all(x == [-2, -3, -4]) assert np.all(y == [1, 2, 3]) def test_compound_model_classify_attributes(): """ Regression test for an issue raised here: https://github.com/astropy/astropy/pull/3231#discussion_r22221123 The issue is that part of the `help` implementation calls a utility function called `inspect.classify_class_attrs`, which was leading to an infinite recursion. This is a useful test in its own right just in that it tests that compound models can be introspected in some useful way without crashing--this works as sort of a test of its somewhat complicated internal state management. This test does not check any of the results of `~inspect.classify_class_attrs`, though it might be useful to at some point. """ inspect.classify_class_attrs(Gaussian1D + Gaussian1D) def test_invalid_operands(): """ Test that certain operators do not work with models whose inputs/outputs do not match up correctly. """ with pytest.raises(ModelDefinitionError): Rotation2D | Gaussian1D with pytest.raises(ModelDefinitionError): Rotation2D(90) | Gaussian1D(1, 0, 0.1) with pytest.raises(ModelDefinitionError): Rotation2D + Gaussian1D with pytest.raises(ModelDefinitionError): Rotation2D(90) + Gaussian1D(1, 0, 0.1) class _ConstraintsTestA(Model): stddev = Parameter(default=0, min=0, max=0.3) mean = Parameter(default=0, fixed=True) @staticmethod def evaluate(stddev, mean): return stddev, mean class _ConstraintsTestB(Model): mean = Parameter(default=0, fixed=True) @staticmethod def evaluate(mean): return mean @pytest.mark.parametrize('model', [Gaussian1D(bounds={'stddev': (0, 0.3)}, fixed={'mean': True}) + Gaussian1D(fixed={'mean': True}), (_ConstraintsTestA + _ConstraintsTestB)()]) def test_inherit_constraints(model): """ Various tests for copying of constraint values between compound models and their members. There are two versions of this test: One where a compound model is created from two model instances, and another where a compound model is created from two model classes that have default constraints set on some of their parameters. Regression test for https://github.com/astropy/astropy/issues/3481 """ # We have to copy the model before modifying it, otherwise the test fails # if it is run twice in a row, because the state of the model instance # would be preserved from one run to the next. model = deepcopy(model) # Lots of assertions in this test as there are multiple interfaces to # parameter constraints assert 'stddev_0' in model.bounds assert model.bounds['stddev_0'] == (0, 0.3) assert model.stddev_0.bounds == (0, 0.3) assert 'mean_0' in model.fixed assert model.fixed['mean_0'] is True assert model.mean_0.fixed is True assert 'mean_1' in model.fixed assert model.fixed['mean_1'] is True assert model.mean_1.fixed is True # Great, all the constraints were inherited properly # Now what about if we update them through the sub-models? model[0].stddev.bounds = (0, 0.4) assert model.bounds['stddev_0'] == (0, 0.4) assert model.stddev_0.bounds == (0, 0.4) assert model[0].stddev.bounds == (0, 0.4) assert model[0].bounds['stddev'] == (0, 0.4) model[0].bounds['stddev'] = (0.1, 0.5) assert model.bounds['stddev_0'] == (0.1, 0.5) assert model.stddev_0.bounds == (0.1, 0.5) assert model[0].stddev.bounds == (0.1, 0.5) assert model[0].bounds['stddev'] == (0.1, 0.5) model[1].mean.fixed = False assert model.fixed['mean_1'] is False assert model.mean_1.fixed is False assert model[1].mean.fixed is False assert model[1].fixed['mean'] is False model[1].fixed['mean'] = True assert model.fixed['mean_1'] is True assert model.mean_1.fixed is True assert model[1].mean.fixed is True assert model[1].fixed['mean'] is True def test_compound_custom_inverse(): """ Test that a compound model with a custom inverse has that inverse applied when the inverse of another model, of which it is a component, is computed. Regression test for https://github.com/astropy/astropy/issues/3542 """ poly = Polynomial1D(1, c0=1, c1=2) scale = Scale(1) shift = Shift(1) model1 = poly | scale model1.inverse = poly # model1 now has a custom inverse (the polynomial itself, ignoring the # trivial scale factor) model2 = shift | model1 assert_allclose(model2.inverse(1), (poly | shift.inverse)(1)) # Make sure an inverse is not allowed if the models were combined with the # wrong operator, or if one of the models doesn't have an inverse defined with pytest.raises(NotImplementedError): (shift + model1).inverse with pytest.raises(NotImplementedError): (model1 & poly).inverse @pytest.mark.parametrize('poly', [Chebyshev2D(1, 2), Polynomial2D(2), Legendre2D(1, 2), Chebyshev1D(5), Legendre1D(5), Polynomial1D(5)]) def test_compound_with_polynomials(poly): """ Tests that polynomials are scaled when used in compound models. Issue #3699 """ poly.parameters = [1, 2, 3, 4, 1, 2] shift = Shift(3) model = poly | shift x, y = np.mgrid[:20, :37] result_compound = model(x, y) result = shift(poly(x, y)) assert_allclose(result, result_compound) # has to be defined at module level since pickling doesn't work right (in # general) for classes defined in functions class _TestPickleModel(Gaussian1D + Gaussian1D): pass def test_pickle_compound(): """ Regression test for https://github.com/astropy/astropy/issues/3867#issuecomment-114547228 """ # Test pickling a compound model class GG = Gaussian1D + Gaussian1D GG2 = pickle.loads(pickle.dumps(GG)) assert GG.param_names == GG2.param_names assert GG.__name__ == GG2.__name__ # Test that it works, or at least evaluates successfully assert GG()(0.12345) == GG2()(0.12345) # Test pickling a compound model instance g1 = Gaussian1D(1.0, 0.0, 0.1) g2 = Gaussian1D([2.0, 3.0], [0.0, 0.0], [0.2, 0.3]) m = g1 + g2 m2 = pickle.loads(pickle.dumps(m)) assert m.param_names == m2.param_names assert m.__class__.__name__ == m2.__class__.__name__ assert np.all(m.parameters == m2.parameters) assert np.all(m(0) == m2(0)) # Test pickling a concrete class p = pickle.dumps(_TestPickleModel, protocol=0) # Note: This is very dependent on the specific protocol, but the point of # this test is that the "concrete" model is pickled in a very simple way # that only specifies the module and class name, and is unpickled by # re-importing the class from the module in which it was defined. This # should still work for concrete subclasses of compound model classes that # were dynamically generated through an expression exp = b'castropy.modeling.tests.test_compound\n_TestPickleModel\np0\n.' # When testing against the expected value we drop the memo length field # at the end, which may differ between runs assert p[:p.rfind(b'p')] == exp[:exp.rfind(b'p')] assert pickle.loads(p) is _TestPickleModel def test_update_parameters(): offx = Shift(1) scl = Scale(2) m = offx | scl assert(m(1) == 4) offx.offset = 42 assert(m(1) == 4) m.factor_1 = 100 assert(m(1) == 200) m2 = m | offx assert(m2(1) == 242) def test_name(): offx = Shift(1) scl = Scale(2) m = offx | scl scl.name = "scale" assert m._submodel_names == ('None_0', 'None_1') assert m.name is None m.name = "M" assert m.name == "M" m1 = m.rename("M1") assert m.name == "M" assert m1.name == "M1"
497a0ce00324bf549f06b747a81d46e1d95b24f08510b10e4076cc0f4a5309a9
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from inspect import signature from numpy.testing.utils import assert_allclose from ..core import Model, custom_model from ..parameters import Parameter from .. import models class NonFittableModel(Model): """An example class directly subclassing Model for testing.""" a = Parameter() def __init__(self, a, model_set_axis=None): super().__init__(a, model_set_axis=model_set_axis) @staticmethod def evaluate(): pass def test_Model_instance_repr_and_str(): m = NonFittableModel(42.5) assert repr(m) == "<NonFittableModel(a=42.5)>" assert (str(m) == "Model: NonFittableModel\n" "Inputs: ()\n" "Outputs: ()\n" "Model set size: 1\n" "Parameters:\n" " a \n" " ----\n" " 42.5") assert len(m) == 1 def test_Model_array_parameter(): model = models.Gaussian1D(4, 2, 1) assert_allclose(model.param_sets, [[4], [2], [1]]) def test_inputless_model(): """ Regression test for https://github.com/astropy/astropy/pull/3772#issuecomment-101821641 """ class TestModel(Model): inputs = () outputs = ('y',) a = Parameter() @staticmethod def evaluate(a): return a m = TestModel(1) assert m.a == 1 assert m() == 1 # Test array-like output m = TestModel([1, 2, 3], model_set_axis=False) assert len(m) == 1 assert np.all(m() == [1, 2, 3]) # Test a model set m = TestModel(a=[1, 2, 3], model_set_axis=0) assert len(m) == 3 assert np.all(m() == [1, 2, 3]) # Test a model set m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=0) assert len(m) == 2 assert np.all(m() == [[1, 2, 3], [4, 5, 6]]) def test_ParametericModel(): with pytest.raises(TypeError): models.Gaussian1D(1, 2, 3, wrong=4) def test_custom_model_signature(): """ Tests that the signatures for the __init__ and __call__ methods of custom models are useful. """ @custom_model def model_a(x): return x assert model_a.param_names == () assert model_a.n_inputs == 1 sig = signature(model_a.__init__) assert list(sig.parameters.keys()) == ['self', 'args', 'meta', 'name', 'kwargs'] sig = signature(model_a.__call__) assert list(sig.parameters.keys()) == ['self', 'x', 'model_set_axis', 'with_bounding_box', 'fill_value', 'equivalencies'] @custom_model def model_b(x, a=1, b=2): return x + a + b assert model_b.param_names == ('a', 'b') assert model_b.n_inputs == 1 sig = signature(model_b.__init__) assert list(sig.parameters.keys()) == ['self', 'a', 'b', 'kwargs'] assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty] sig = signature(model_b.__call__) assert list(sig.parameters.keys()) == ['self', 'x', 'model_set_axis', 'with_bounding_box', 'fill_value', 'equivalencies'] @custom_model def model_c(x, y, a=1, b=2): return x + y + a + b assert model_c.param_names == ('a', 'b') assert model_c.n_inputs == 2 sig = signature(model_c.__init__) assert list(sig.parameters.keys()) == ['self', 'a', 'b', 'kwargs'] assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty] sig = signature(model_c.__call__) assert list(sig.parameters.keys()) == ['self', 'x', 'y', 'model_set_axis', 'with_bounding_box', 'fill_value', 'equivalencies'] def test_custom_model_subclass(): """Test that custom models can be subclassed.""" @custom_model def model_a(x, a=1): return x * a class model_b(model_a): # Override the evaluate from model_a @classmethod def evaluate(cls, x, a): return -super().evaluate(x, a) b = model_b() assert b.param_names == ('a',) assert b.a == 1 assert b(1) == -1 sig = signature(model_b.__init__) assert list(sig.parameters.keys()) == ['self', 'a', 'kwargs'] sig = signature(model_b.__call__) assert list(sig.parameters.keys()) == ['self', 'x', 'model_set_axis', 'with_bounding_box', 'fill_value', 'equivalencies'] def test_custom_model_parametrized_decorator(): """Tests using custom_model as a decorator with parameters.""" def cosine(x, amplitude=1): return [amplitude * np.cos(x)] @custom_model(fit_deriv=cosine) def sine(x, amplitude=1): return amplitude * np.sin(x) assert issubclass(sine, Model) s = sine(2) assert_allclose(s(np.pi / 2), 2) assert_allclose(s.fit_deriv(0, 2), 2) def test_custom_inverse(): """Test setting a custom inverse on a model.""" p = models.Polynomial1D(1, c0=-2, c1=3) # A trivial inverse for a trivial polynomial inv = models.Polynomial1D(1, c0=(2./3.), c1=(1./3.)) with pytest.raises(NotImplementedError): p.inverse p.inverse = inv x = np.arange(100) assert_allclose(x, p(p.inverse(x))) assert_allclose(x, p.inverse(p(x))) p.inverse = None with pytest.raises(NotImplementedError): p.inverse def test_custom_inverse_reset(): """Test resetting a custom inverse to the model's default inverse.""" class TestModel(Model): inputs = () outputs = ('y',) @property def inverse(self): return models.Shift() @staticmethod def evaluate(): return 0 # The above test model has no meaning, nor does its inverse--this just # tests that setting an inverse and resetting to the default inverse works m = TestModel() assert isinstance(m.inverse, models.Shift) m.inverse = models.Scale() assert isinstance(m.inverse, models.Scale) del m.inverse assert isinstance(m.inverse, models.Shift) def test_render_model_2d(): imshape = (71, 141) image = np.zeros(imshape) coords = y, x = np.indices(imshape) model = models.Gaussian2D(x_stddev=6.1, y_stddev=3.9, theta=np.pi / 3) # test points for edges ye, xe = [0, 35, 70], [0, 70, 140] # test points for floating point positions yf, xf = [35.1, 35.5, 35.9], [70.1, 70.5, 70.9] test_pts = [(a, b) for a in xe for b in ye] test_pts += [(a, b) for a in xf for b in yf] for x0, y0 in test_pts: model.x_mean = x0 model.y_mean = y0 expected = model(x, y) for xy in [coords, None]: for im in [image.copy(), None]: if (im is None) & (xy is None): # this case is tested in Fittable2DModelTester continue actual = model.render(out=im, coords=xy) if im is None: assert_allclose(actual, model.render(coords=xy)) # assert images match assert_allclose(expected, actual, atol=3e-7) # assert model fully captured if (x0, y0) == (70, 35): boxed = model.render() flux = np.sum(expected) assert ((flux - np.sum(boxed)) / flux) < 1e-7 # test an error is raised when the bounding box is larger than the input array try: actual = model.render(out=np.zeros((1, 1))) except ValueError: pass def test_render_model_1d(): npix = 101 image = np.zeros(npix) coords = np.arange(npix) model = models.Gaussian1D() # test points test_pts = [0, 49.1, 49.5, 49.9, 100] # test widths test_stdv = np.arange(5.5, 6.7, .2) for x0, stdv in [(p, s) for p in test_pts for s in test_stdv]: model.mean = x0 model.stddev = stdv expected = model(coords) for x in [coords, None]: for im in [image.copy(), None]: if (im is None) & (x is None): # this case is tested in Fittable1DModelTester continue actual = model.render(out=im, coords=x) # assert images match assert_allclose(expected, actual, atol=3e-7) # assert model fully captured if (x0, stdv) == (49.5, 5.5): boxed = model.render() flux = np.sum(expected) assert ((flux - np.sum(boxed)) / flux) < 1e-7 def test_render_model_3d(): imshape = (17, 21, 27) image = np.zeros(imshape) coords = np.indices(imshape) def ellipsoid(x, y, z, x0=13., y0=10., z0=8., a=4., b=3., c=2., amp=1.): rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2 val = (rsq < 1) * amp return val class Ellipsoid3D(custom_model(ellipsoid)): @property def bounding_box(self): return ((self.z0 - self.c, self.z0 + self.c), (self.y0 - self.b, self.y0 + self.b), (self.x0 - self.a, self.x0 + self.a)) model = Ellipsoid3D() # test points for edges ze, ye, xe = [0, 8, 16], [0, 10, 20], [0, 13, 26] # test points for floating point positions zf, yf, xf = [8.1, 8.5, 8.9], [10.1, 10.5, 10.9], [13.1, 13.5, 13.9] test_pts = [(x, y, z) for x in xe for y in ye for z in ze] test_pts += [(x, y, z) for x in xf for y in yf for z in zf] for x0, y0, z0 in test_pts: model.x0 = x0 model.y0 = y0 model.z0 = z0 expected = model(*coords[::-1]) for c in [coords, None]: for im in [image.copy(), None]: if (im is None) & (c is None): continue actual = model.render(out=im, coords=c) boxed = model.render() # assert images match assert_allclose(expected, actual) # assert model fully captured if (z0, y0, x0) == (8, 10, 13): boxed = model.render() assert (np.sum(expected) - np.sum(boxed)) == 0 def test_custom_bounding_box_1d(): """ Tests that the bounding_box setter works. """ # 1D models g1 = models.Gaussian1D() bb = g1.bounding_box expected = g1.render() # assign the same bounding_box, now through the bounding_box setter g1.bounding_box = bb assert_allclose(g1.render(), expected) # 2D models g2 = models.Gaussian2D() bb = g2.bounding_box expected = g2.render() # assign the same bounding_box, now through the bounding_box setter g2.bounding_box = bb assert_allclose(g2.render(), expected) def test_n_submodels_in_single_models(): assert models.Gaussian1D.n_submodels() == 1 assert models.Gaussian2D.n_submodels() == 1 def test_compound_deepcopy(): model = (models.Gaussian1D(10, 2,3) | models.Shift(2)) & models.Rotation2D(21.3) new_model = model.deepcopy() assert id(model) != id(new_model) assert id(model._submodels) != id(new_model._submodels) assert id(model._submodels[0]) != id(new_model._submodels[0]) assert id(model._submodels[1]) != id(new_model._submodels[1]) assert id(model._submodels[2]) != id(new_model._submodels[2])
d8e764c2519fa078cd276e8d74d86d39e92b9ab094fa1a7bfbae1fa61621c890
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Test separability of models. """ import pytest import numpy as np from numpy.testing import assert_allclose from .. import models from ..models import Mapping from .. separable import (_coord_matrix, is_separable, _cdot, _cstack, _arith_oper) sh1 = models.Shift(1, name='shift1') sh2 = models.Shift(2, name='sh2') scl1 = models.Scale(1, name='scl1') scl2 = models.Scale(2, name='scl2') map1 = Mapping((0, 1, 0, 1), name='map1') map2 = Mapping((0, 0, 1), name='map2') map3 = Mapping((0, 0), name='map3') rot = models.Rotation2D(2, name='rotation') p2 = models.Polynomial2D(1, name='p2') p22 = models.Polynomial2D(2, name='p22') p1 = models.Polynomial1D(1, name='p1') compound_models = { 'cm1': (map3 & sh1 | rot & sh1 | sh1 & sh2 & sh1, np.array([False, False, True]) ), 'cm2': (sh1 & sh2 | rot | map1 | p2 & p22, np.array([False, False]) ), 'cm3': (map2 | rot & scl1, np.array([False, False, True]) ), 'cm4': (sh1 & sh2 | map2 | rot & scl1, np.array([False, False, True]) ), 'cm5': (map3 | sh1 & sh2 | scl1 & scl2, np.array([False, False]) ), 'cm7': (map2 | p2 & sh1, np. array([False, True]) ) } def test_coord_matrix(): c = _coord_matrix(p2, 'left', 2) assert_allclose(np.array([[1, 1], [0, 0]]), c) c = _coord_matrix(p2, 'right', 2) assert_allclose(np.array([[0, 0], [1, 1]]), c) c = _coord_matrix(p1, 'left', 2) assert_allclose(np.array([[1], [0]]), c) c = _coord_matrix(p1, 'left', 1) assert_allclose(np.array([[1]]), c) c = _coord_matrix(sh1, 'left', 2) assert_allclose(np.array([[1], [0]]), c) c = _coord_matrix(sh1, 'right', 2) assert_allclose(np.array([[0], [1]]), c) c = _coord_matrix(sh1, 'right', 3) assert_allclose(np.array([[0], [0], [1]]), c) c = _coord_matrix(map3, 'left', 2) assert_allclose(np.array([[1], [1]]), c) c = _coord_matrix(map3, 'left', 3) assert_allclose(np.array([[1], [1], [0]]), c) def test_cdot(): result = _cdot(sh1, scl1) assert_allclose(result, np.array([[1]])) result = _cdot(rot, p2) assert_allclose(result, np.array([[2, 2]])) result = _cdot(rot, rot) assert_allclose(result, np.array([[2, 2], [2, 2]])) result = _cdot(Mapping((0, 0)), rot) assert_allclose(result, np.array([[2], [2]])) def test_cstack(): result = _cstack(sh1, scl1) assert_allclose(result, np.array([[1, 0], [0, 1]])) result = _cstack(sh1, rot) assert_allclose(result, np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]]) ) result = _cstack(rot, sh1) assert_allclose(result, np.array([[1, 1, 0], [1, 1, 0], [0, 0, 1]]) ) def test_arith_oper(): result = _arith_oper(sh1, scl1) assert_allclose(result, np.array([[1]])) result = _arith_oper(rot, rot) assert_allclose(result, np.array([[1, 1], [1, 1]])) @pytest.mark.parametrize(('compound_model', 'result'), compound_models.values()) def test_separable(compound_model, result): assert_allclose(is_separable(compound_model), result)
5250600817b8f3a277718909c119e1e71162419dff7c67caaa61f5b53a348fea
# Licensed under a 3-clause BSD style license - see LICENSE.rst # -*- coding: utf-8 -*- import contextlib import warnings from ...tests.helper import catch_warnings @contextlib.contextmanager def ignore_non_integer_warning(): # We need to ignore this warning on Scipy < 0.14. # When our minimum version of Scipy is bumped up, this can be # removed. with catch_warnings(): warnings.filterwarnings( "always", "using a non-integer number instead of an integer " "will result in an error in the future", DeprecationWarning) yield
91aadc2ff7ff8b57de275d534ecf51e88ce3b9e00882dd96cbe37b31141ab5ee
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Tests models.parameters """ import itertools import pytest import numpy as np from numpy.testing import utils from . import irafutil from .. import models, fitting from ..core import Model, FittableModel from ..parameters import Parameter, InputParameterError from ...utils.data import get_pkg_data_filename def setter1(val): return val def setter2(val, model): model.do_something(val) return val * model.p class SetterModel(FittableModel): inputs = ('x', 'y') outputs = ('z',) xc = Parameter(default=1, setter=setter1) yc = Parameter(default=1, setter=setter2) def __init__(self, xc, yc, p): self.p = p # p is a value intended to be used by the setter super().__init__() self.xc = xc self.yc = yc def evaluate(self, x, y, xc, yc): return ((x - xc)**2 + (y - yc)**2) def do_something(self, v): pass class TParModel(Model): """ A toy model to test parameters machinery """ coeff = Parameter() e = Parameter() def __init__(self, coeff, e, **kwargs): super().__init__(coeff=coeff, e=e, **kwargs) @staticmethod def evaluate(coeff, e): pass class MockModel(FittableModel): alpha = Parameter(name='alpha', default=42) @staticmethod def evaluate(*args): pass def test_parameter_properties(): """Test if getting / setting of Parameter properties works.""" m = MockModel() p = m.alpha assert p.name == 'alpha' # Parameter names are immutable with pytest.raises(AttributeError): p.name = 'beta' assert p.fixed is False p.fixed = True assert p.fixed is True assert p.tied is False p.tied = lambda _: 0 p.tied = False assert p.tied is False assert p.min is None p.min = 42 assert p.min == 42 p.min = None assert p.min is None assert p.max is None # TODO: shouldn't setting a max < min give an error? p.max = 41 assert p.max == 41 def test_parameter_operators(): """Test if the parameter arithmetic operators work.""" m = MockModel() par = m.alpha num = 42. val = 3 assert par - val == num - val assert val - par == val - num assert par / val == num / val assert val / par == val / num assert par ** val == num ** val assert val ** par == val ** num assert par < 45 assert par > 41 assert par <= par assert par >= par assert par == par assert -par == -num assert abs(par) == abs(num) class TestParameters: def setup_class(self): """ Unit tests for parameters Read an iraf database file created by onedspec.identify. Use the information to create a 1D Chebyshev model and perform the same fit. Create also a gausian model. """ test_file = get_pkg_data_filename('data/idcompspec.fits') f = open(test_file) lines = f.read() reclist = lines.split("begin") f.close() record = irafutil.IdentifyRecord(reclist[1]) self.icoeff = record.coeff order = int(record.fields['order']) self.model = models.Chebyshev1D(order - 1) self.gmodel = models.Gaussian1D(2, mean=3, stddev=4) self.linear_fitter = fitting.LinearLSQFitter() self.x = record.x self.y = record.z self.yy = np.array([record.z, record.z]) def test_set_slice(self): """ Tests updating the parameters attribute with a slice. This is what fitters internally do. """ self.model.parameters[:] = np.array([3, 4, 5, 6, 7]) assert (self.model.parameters == [3., 4., 5., 6., 7.]).all() def test_set_parameters_as_list(self): """Tests updating parameters using a list.""" self.model.parameters = [30, 40, 50, 60, 70] assert (self.model.parameters == [30., 40., 50., 60, 70]).all() def test_set_parameters_as_array(self): """Tests updating parameters using an array.""" self.model.parameters = np.array([3, 4, 5, 6, 7]) assert (self.model.parameters == [3., 4., 5., 6., 7.]).all() def test_set_as_tuple(self): """Tests updating parameters using a tuple.""" self.model.parameters = (1, 2, 3, 4, 5) assert (self.model.parameters == [1, 2, 3, 4, 5]).all() def test_set_model_attr_seq(self): """ Tests updating the parameters attribute when a model's parameter (in this case coeff) is updated. """ self.model.parameters = [0, 0., 0., 0, 0] self.model.c0 = 7 assert (self.model.parameters == [7, 0., 0., 0, 0]).all() def test_set_model_attr_num(self): """Update the parameter list when a model's parameter is updated.""" self.gmodel.amplitude = 7 assert (self.gmodel.parameters == [7, 3, 4]).all() def test_set_item(self): """Update the parameters using indexing.""" self.model.parameters = [1, 2, 3, 4, 5] self.model.parameters[0] = 10. assert (self.model.parameters == [10, 2, 3, 4, 5]).all() assert self.model.c0 == 10 def test_wrong_size1(self): """ Tests raising an error when attempting to reset the parameters using a list of a different size. """ with pytest.raises(InputParameterError): self.model.parameters = [1, 2, 3] def test_wrong_size2(self): """ Tests raising an exception when attempting to update a model's parameter (in this case coeff) with a sequence of the wrong size. """ with pytest.raises(InputParameterError): self.model.c0 = [1, 2, 3] def test_wrong_shape(self): """ Tests raising an exception when attempting to update a model's parameter and the new value has the wrong shape. """ with pytest.raises(InputParameterError): self.gmodel.amplitude = [1, 2] def test_par_against_iraf(self): """ Test the fitter modifies model.parameters. Uses an iraf example. """ new_model = self.linear_fitter(self.model, self.x, self.y) print(self.y, self.x) utils.assert_allclose(new_model.parameters, np.array( [4826.1066602783685, 952.8943813407858, 12.641236013982386, -1.7910672553339604, 0.90252884366711317]), rtol=10 ** (-2)) def testPolynomial1D(self): d = {'c0': 11, 'c1': 12, 'c2': 13, 'c3': 14} p1 = models.Polynomial1D(3, **d) utils.assert_equal(p1.parameters, [11, 12, 13, 14]) def test_poly1d_multiple_sets(self): p1 = models.Polynomial1D(3, n_models=3) utils.assert_equal(p1.parameters, [0.0, 0.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) utils.assert_array_equal(p1.c0, [0, 0, 0]) p1.c0 = [10, 10, 10] utils.assert_equal(p1.parameters, [10.0, 10.0, 10.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) def test_par_slicing(self): """ Test assigning to a parameter slice """ p1 = models.Polynomial1D(3, n_models=3) p1.c0[:2] = [10, 10] utils.assert_equal(p1.parameters, [10.0, 10.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) def test_poly2d(self): p2 = models.Polynomial2D(degree=3) p2.c0_0 = 5 utils.assert_equal(p2.parameters, [5, 0, 0, 0, 0, 0, 0, 0, 0, 0]) def test_poly2d_multiple_sets(self): kw = {'c0_0': [2, 3], 'c1_0': [1, 2], 'c2_0': [4, 5], 'c0_1': [1, 1], 'c0_2': [2, 2], 'c1_1': [5, 5]} p2 = models.Polynomial2D(2, **kw) utils.assert_equal(p2.parameters, [2, 3, 1, 2, 4, 5, 1, 1, 2, 2, 5, 5]) def test_shift_model_parameters1d(self): sh1 = models.Shift(2) sh1.offset = 3 assert sh1.offset == 3 assert sh1.offset.value == 3 def test_scale_model_parametersnd(self): sc1 = models.Scale([2, 2]) sc1.factor = [3, 3] assert np.all(sc1.factor == [3, 3]) utils.assert_array_equal(sc1.factor.value, [3, 3]) def test_parameters_wrong_shape(self): sh1 = models.Shift(2) with pytest.raises(InputParameterError): sh1.offset = [3, 3] class TestMultipleParameterSets: def setup_class(self): self.x1 = np.arange(1, 10, .1) self.y, self.x = np.mgrid[:10, :7] self.x11 = np.array([self.x1, self.x1]).T self.gmodel = models.Gaussian1D([12, 10], [3.5, 5.2], stddev=[.4, .7], n_models=2) def test_change_par(self): """ Test that a change to one parameter as a set propagates to param_sets. """ self.gmodel.amplitude = [1, 10] utils.assert_almost_equal( self.gmodel.param_sets, np.array([[1., 10], [3.5, 5.2], [0.4, 0.7]])) np.all(self.gmodel.parameters == [1.0, 10.0, 3.5, 5.2, 0.4, 0.7]) def test_change_par2(self): """ Test that a change to one single parameter in a set propagates to param_sets. """ self.gmodel.amplitude[0] = 11 utils.assert_almost_equal( self.gmodel.param_sets, np.array([[11., 10], [3.5, 5.2], [0.4, 0.7]])) np.all(self.gmodel.parameters == [11.0, 10.0, 3.5, 5.2, 0.4, 0.7]) def test_change_parameters(self): self.gmodel.parameters = [13, 10, 9, 5.2, 0.4, 0.7] utils.assert_almost_equal(self.gmodel.amplitude.value, [13., 10.]) utils.assert_almost_equal(self.gmodel.mean.value, [9., 5.2]) class TestParameterInitialization: """ This suite of tests checks most if not all cases if instantiating a model with parameters of different shapes/sizes and with different numbers of parameter sets. """ def test_single_model_scalar_parameters(self): t = TParModel(10, 1) assert len(t) == 1 assert t.model_set_axis is False assert np.all(t.param_sets == [[10], [1]]) assert np.all(t.parameters == [10, 1]) assert t.coeff.shape == () assert t.e.shape == () def test_single_model_scalar_and_array_parameters(self): t = TParModel(10, [1, 2]) assert len(t) == 1 assert t.model_set_axis is False assert np.issubdtype(t.param_sets.dtype, np.object_) assert len(t.param_sets) == 2 assert np.all(t.param_sets[0] == [10]) assert np.all(t.param_sets[1] == [[1, 2]]) assert np.all(t.parameters == [10, 1, 2]) assert t.coeff.shape == () assert t.e.shape == (2,) def test_single_model_1d_array_parameters(self): t = TParModel([10, 20], [1, 2]) assert len(t) == 1 assert t.model_set_axis is False assert np.all(t.param_sets == [[[10, 20]], [[1, 2]]]) assert np.all(t.parameters == [10, 20, 1, 2]) assert t.coeff.shape == (2,) assert t.e.shape == (2,) def test_single_model_1d_array_different_length_parameters(self): with pytest.raises(InputParameterError): # Not broadcastable t = TParModel([1, 2], [3, 4, 5]) def test_single_model_2d_array_parameters(self): t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]]) assert len(t) == 1 assert t.model_set_axis is False assert np.all(t.param_sets == [[[[10, 20], [30, 40]]], [[[1, 2], [3, 4]]]]) assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4]) assert t.coeff.shape == (2, 2) assert t.e.shape == (2, 2) def test_single_model_2d_non_square_parameters(self): coeff = np.array([[10, 20], [30, 40], [50, 60]]) e = np.array([[1, 2], [3, 4], [5, 6]]) t = TParModel(coeff, e) assert len(t) == 1 assert t.model_set_axis is False assert np.all(t.param_sets == [[[[10, 20], [30, 40], [50, 60]]], [[[1, 2], [3, 4], [5, 6]]]]) assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 1, 2, 3, 4, 5, 6]) assert t.coeff.shape == (3, 2) assert t.e.shape == (3, 2) t2 = TParModel(coeff.T, e.T) assert len(t2) == 1 assert t2.model_set_axis is False assert np.all(t2.param_sets == [[[[10, 30, 50], [20, 40, 60]]], [[[1, 3, 5], [2, 4, 6]]]]) assert np.all(t2.parameters == [10, 30, 50, 20, 40, 60, 1, 3, 5, 2, 4, 6]) assert t2.coeff.shape == (2, 3) assert t2.e.shape == (2, 3) # Not broadcastable with pytest.raises(InputParameterError): TParModel(coeff, e.T) with pytest.raises(InputParameterError): TParModel(coeff.T, e) def test_single_model_2d_broadcastable_parameters(self): t = TParModel([[10, 20, 30], [40, 50, 60]], [1, 2, 3]) assert len(t) == 1 assert t.model_set_axis is False assert len(t.param_sets) == 2 assert np.issubdtype(t.param_sets.dtype, np.object_) assert np.all(t.param_sets[0] == [[[10, 20, 30], [40, 50, 60]]]) assert np.all(t.param_sets[1] == [[1, 2, 3]]) assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 1, 2, 3]) @pytest.mark.parametrize(('p1', 'p2'), [ (1, 2), (1, [2, 3]), ([1, 2], 3), ([1, 2, 3], [4, 5]), ([1, 2], [3, 4, 5])]) def test_two_model_incorrect_scalar_parameters(self, p1, p2): with pytest.raises(InputParameterError): TParModel(p1, p2, n_models=2) @pytest.mark.parametrize('kwargs', [ {'n_models': 2}, {'model_set_axis': 0}, {'n_models': 2, 'model_set_axis': 0}]) def test_two_model_scalar_parameters(self, kwargs): t = TParModel([10, 20], [1, 2], **kwargs) assert len(t) == 2 assert t.model_set_axis == 0 assert np.all(t.param_sets == [[10, 20], [1, 2]]) assert np.all(t.parameters == [10, 20, 1, 2]) assert t.coeff.shape == () assert t.e.shape == () @pytest.mark.parametrize('kwargs', [ {'n_models': 2}, {'model_set_axis': 0}, {'n_models': 2, 'model_set_axis': 0}]) def test_two_model_scalar_and_array_parameters(self, kwargs): t = TParModel([10, 20], [[1, 2], [3, 4]], **kwargs) assert len(t) == 2 assert t.model_set_axis == 0 assert len(t.param_sets) == 2 assert np.issubdtype(t.param_sets.dtype, np.object_) assert np.all(t.param_sets[0] == [[10], [20]]) assert np.all(t.param_sets[1] == [[1, 2], [3, 4]]) assert np.all(t.parameters == [10, 20, 1, 2, 3, 4]) assert t.coeff.shape == () assert t.e.shape == (2,) def test_two_model_1d_array_parameters(self): t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]], n_models=2) assert len(t) == 2 assert t.model_set_axis == 0 assert np.all(t.param_sets == [[[10, 20], [30, 40]], [[1, 2], [3, 4]]]) assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4]) assert t.coeff.shape == (2,) assert t.e.shape == (2,) t2 = TParModel([[10, 20, 30], [40, 50, 60]], [[1, 2, 3], [4, 5, 6]], n_models=2) assert len(t2) == 2 assert t2.model_set_axis == 0 assert np.all(t2.param_sets == [[[10, 20, 30], [40, 50, 60]], [[1, 2, 3], [4, 5, 6]]]) assert np.all(t2.parameters == [10, 20, 30, 40, 50, 60, 1, 2, 3, 4, 5, 6]) assert t2.coeff.shape == (3,) assert t2.e.shape == (3,) def test_two_model_mixed_dimension_array_parameters(self): with pytest.raises(InputParameterError): # Can't broadcast different array shapes TParModel([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[9, 10, 11], [12, 13, 14]], n_models=2) t = TParModel([[[10, 20], [30, 40]], [[50, 60], [70, 80]]], [[1, 2], [3, 4]], n_models=2) assert len(t) == 2 assert t.model_set_axis == 0 assert len(t.param_sets) == 2 assert np.issubdtype(t.param_sets.dtype, np.object_) assert np.all(t.param_sets[0] == [[[10, 20], [30, 40]], [[50, 60], [70, 80]]]) assert np.all(t.param_sets[1] == [[[1, 2]], [[3, 4]]]) assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 70, 80, 1, 2, 3, 4]) assert t.coeff.shape == (2, 2) assert t.e.shape == (2,) def test_two_model_2d_array_parameters(self): t = TParModel([[[10, 20], [30, 40]], [[50, 60], [70, 80]]], [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], n_models=2) assert len(t) == 2 assert t.model_set_axis == 0 assert np.all(t.param_sets == [[[[10, 20], [30, 40]], [[50, 60], [70, 80]]], [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]) assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 70, 80, 1, 2, 3, 4, 5, 6, 7, 8]) assert t.coeff.shape == (2, 2) assert t.e.shape == (2, 2) def test_two_model_nonzero_model_set_axis(self): # An example where the model set axis is the *last* axis of the # parameter arrays coeff = np.array([[[10, 20], [30, 40]], [[50, 60], [70, 80]]]) coeff = np.rollaxis(coeff, 0, 3) e = np.array([[1, 2], [3, 4]]) e = np.rollaxis(e, 0, 2) t = TParModel(coeff, e, model_set_axis=-1) assert len(t) == 2 assert t.model_set_axis == -1 assert len(t.param_sets) == 2 assert np.issubdtype(t.param_sets.dtype, np.object_) assert np.all(t.param_sets[0] == [[[10, 50], [20, 60]], [[30, 70], [40, 80]]]) assert np.all(t.param_sets[1] == [[[1, 3], [2, 4]]]) assert np.all(t.parameters == [10, 50, 20, 60, 30, 70, 40, 80, 1, 3, 2, 4]) assert t.coeff.shape == (2, 2) assert t.e.shape == (2,) def test_wrong_number_of_params(self): with pytest.raises(InputParameterError): TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), n_models=2) with pytest.raises(InputParameterError): TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), model_set_axis=0) def test_wrong_number_of_params2(self): with pytest.raises(InputParameterError): m = TParModel(coeff=[[1, 2], [3, 4]], e=4, n_models=2) with pytest.raises(InputParameterError): m = TParModel(coeff=[[1, 2], [3, 4]], e=4, model_set_axis=0) def test_array_parameter1(self): with pytest.raises(InputParameterError): t = TParModel(np.array([[1, 2], [3, 4]]), 1, model_set_axis=0) def test_array_parameter2(self): with pytest.raises(InputParameterError): m = TParModel(np.array([[1, 2], [3, 4]]), (1, 1, 11), model_set_axis=0) def test_array_parameter4(self): """ Test multiple parameter model with array-valued parameters of the same size as the number of parameter sets. """ t4 = TParModel([[1, 2], [3, 4]], [5, 6], model_set_axis=False) assert len(t4) == 1 assert t4.coeff.shape == (2, 2) assert t4.e.shape == (2,) assert np.issubdtype(t4.param_sets.dtype, np.object_) assert np.all(t4.param_sets[0] == [[1, 2], [3, 4]]) assert np.all(t4.param_sets[1] == [5, 6]) def test_non_broadcasting_parameters(): """ Tests that in a model with 3 parameters that do not all mutually broadcast, this is determined correctly regardless of what order the parameters are in. """ a = 3 b = np.array([[1, 2, 3], [4, 5, 6]]) c = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) class TestModel(Model): p1 = Parameter() p2 = Parameter() p3 = Parameter() def evaluate(self, *args): return # a broadcasts with both b and c, but b does not broadcast with c for args in itertools.permutations((a, b, c)): with pytest.raises(InputParameterError): TestModel(*args) def test_setter(): pars = np.random.rand(20).reshape((10, 2)) model = SetterModel(-1, 3, np.pi) for x, y in pars: model.x = x model.y = y utils.assert_almost_equal(model(x, y), (x + 1)**2 + (y - np.pi * 3)**2)
35bc8a017ab5f1b554fae340cf024774ffe7dc2440ced86793cf7ad606617317
# Licensed under a 3-clause BSD style license - see LICENSE.rst from math import cos, sin import pytest import numpy as np from numpy.testing import utils from .. import models from ...wcs import wcs @pytest.mark.parametrize(('inp'), [(0, 0), (4000, -20.56), (-2001.5, 45.9), (0, 90), (0, -90), (np.mgrid[:4, :6])]) def test_against_wcslib(inp): w = wcs.WCS() crval = [202.4823228, 47.17511893] w.wcs.crval = crval w.wcs.ctype = ['RA---TAN', 'DEC--TAN'] lonpole = 180 tan = models.Pix2Sky_TAN() n2c = models.RotateNative2Celestial(crval[0], crval[1], lonpole) c2n = models.RotateCelestial2Native(crval[0], crval[1], lonpole) m = tan | n2c minv = c2n | tan.inverse radec = w.wcs_pix2world(inp[0], inp[1], 1) xy = w.wcs_world2pix(radec[0], radec[1], 1) utils.assert_allclose(m(*inp), radec, atol=1e-12) utils.assert_allclose(minv(*radec), xy, atol=1e-12) @pytest.mark.parametrize(('inp'), [(0, 0), (40, -20.56), (21.5, 45.9)]) def test_roundtrip_sky_rotaion(inp): lon, lat, lon_pole = 42, 43, 44 n2c = models.RotateNative2Celestial(lon, lat, lon_pole) c2n = models.RotateCelestial2Native(lon, lat, lon_pole) utils.assert_allclose(n2c.inverse(*n2c(*inp)), inp, atol=1e-13) utils.assert_allclose(c2n.inverse(*c2n(*inp)), inp, atol=1e-13) def test_native_celestial_lat90(): n2c = models.RotateNative2Celestial(1, 90, 0) alpha, delta = n2c(1, 1) utils.assert_allclose(delta, 1) utils.assert_allclose(alpha, 182) def test_Rotation2D(): model = models.Rotation2D(angle=90) x, y = model(1, 0) utils.assert_allclose([x, y], [0, 1], atol=1e-10) def test_Rotation2D_inverse(): model = models.Rotation2D(angle=234.23494) x, y = model.inverse(*model(1, 0)) utils.assert_allclose([x, y], [1, 0], atol=1e-10) def test_euler_angle_rotations(): x = (0, 0) y = (90, 0) z = (0, 90) negx = (180, 0) negy = (-90, 0) # rotate y into minus z model = models.EulerAngleRotation(0, 90, 0, 'zxz') utils.assert_allclose(model(*z), y, atol=10**-12) # rotate z into minus x model = models.EulerAngleRotation(0, 90, 0, 'zyz') utils.assert_allclose(model(*z), negx, atol=10**-12) # rotate x into minus y model = models.EulerAngleRotation(0, 90, 0, 'yzy') utils.assert_allclose(model(*x), negy, atol=10**-12) euler_axes_order = ['zxz', 'zyz', 'yzy', 'yxy', 'xyx', 'xzx'] @pytest.mark.parametrize(('axes_order'), euler_axes_order) def test_euler_angles(axes_order): """ Tests against all Euler sequences. The rotation matrices definitions come from Wikipedia. """ phi = np.deg2rad(23.4) theta = np.deg2rad(12.2) psi = np.deg2rad(34) c1 = cos(phi) c2 = cos(theta) c3 = cos(psi) s1 = sin(phi) s2 = sin(theta) s3 = sin(psi) matrices = {'zxz': np.array([[(c1*c3 - c2*s1*s3), (-c1*s3 - c2*c3*s1), (s1*s2)], [(c3*s1 + c1*c2*s3), (c1*c2*c3 - s1*s3), (-c1*s2)], [(s2*s3), (c3*s2), (c2)]]), 'zyz': np.array([[(c1*c2*c3 - s1*s3), (-c3*s1 - c1*c2*s3), (c1*s2)], [(c1*s3 + c2*c3*s1), (c1*c3 - c2*s1*s3), (s1*s2)], [(-c3*s2), (s2*s3), (c2)]]), 'yzy': np.array([[(c1*c2*c3 - s1*s3), (-c1*s2), (c3*s1+c1*c2*s3)], [(c3*s2), (c2), (s2*s3)], [(-c1*s3 - c2*c3*s1), (s1*s2), (c1*c3-c2*s1*s3)]]), 'yxy': np.array([[(c1*c3 - c2*s1*s3), (s1*s2), (c1*s3+c2*c3*s1)], [(s2*s3), (c2), (-c3*s2)], [(-c3*s1 - c1*c2*s3), (c1*s2), (c1*c2*c3 - s1*s3)]]), 'xyx': np.array([[(c2), (s2*s3), (c3*s2)], [(s1*s2), (c1*c3 - c2*s1*s3), (-c1*s3 - c2*c3*s1)], [(-c1*s2), (c3*s1 + c1*c2*s3), (c1*c2*c3 - s1*s3)]]), 'xzx': np.array([[(c2), (-c3*s2), (s2*s3)], [(c1*s2), (c1*c2*c3 - s1*s3), (-c3*s1 - c1*c2*s3)], [(s1*s2), (c1*s3 + c2*c3*s1), (c1*c3 - c2*s1*s3)]]) } model = models.EulerAngleRotation(23.4, 12.2, 34, axes_order) mat = model._create_matrix(phi, theta, psi, axes_order) utils.assert_allclose(mat.T, matrices[axes_order]) # get_rotation_matrix(axes_order))
fe180042a337f84ae7b873e208b39ed3bbd51517989b50248863839a85543690
# Licensed under a 3-clause BSD style license - see LICENSE.rst import types import pytest import numpy as np from numpy.testing import utils from numpy.random import RandomState from ..core import Fittable1DModel from ..parameters import Parameter from .. import models from .. import fitting from .utils import ignore_non_integer_warning try: from scipy import optimize HAS_SCIPY = True except ImportError: HAS_SCIPY = False class TestNonLinearConstraints: def setup_class(self): self.g1 = models.Gaussian1D(10, 14.9, stddev=.3) self.g2 = models.Gaussian1D(10, 13, stddev=.4) self.x = np.arange(10, 20, .1) self.y1 = self.g1(self.x) self.y2 = self.g2(self.x) rsn = RandomState(1234567890) self.n = rsn.randn(100) self.ny1 = self.y1 + 2 * self.n self.ny2 = self.y2 + 2 * self.n @pytest.mark.skipif('not HAS_SCIPY') def test_fixed_par(self): g1 = models.Gaussian1D(10, mean=14.9, stddev=.3, fixed={'amplitude': True}) fitter = fitting.LevMarLSQFitter() model = fitter(g1, self.x, self.ny1) assert model.amplitude.value == 10 @pytest.mark.skipif('not HAS_SCIPY') def test_tied_par(self): def tied(model): mean = 50 * model.stddev return mean g1 = models.Gaussian1D(10, mean=14.9, stddev=.3, tied={'mean': tied}) fitter = fitting.LevMarLSQFitter() model = fitter(g1, self.x, self.ny1) utils.assert_allclose(model.mean.value, 50 * model.stddev, rtol=10 ** (-5)) @pytest.mark.skipif('not HAS_SCIPY') def test_joint_fitter(self): g1 = models.Gaussian1D(10, 14.9, stddev=.3) g2 = models.Gaussian1D(10, 13, stddev=.4) jf = fitting.JointFitter([g1, g2], {g1: ['amplitude'], g2: ['amplitude']}, [9.8]) x = np.arange(10, 20, .1) y1 = g1(x) y2 = g2(x) n = np.random.randn(100) ny1 = y1 + 2 * n ny2 = y2 + 2 * n jf(x, ny1, x, ny2) p1 = [14.9, .3] p2 = [13, .4] A = 9.8 p = np.r_[A, p1, p2] def compmodel(A, p, x): return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2) def errf(p, x1, y1, x2, y2): return np.ravel( np.r_[compmodel(p[0], p[1:3], x1) - y1, compmodel(p[0], p[3:], x2) - y2]) fitparams, _ = optimize.leastsq(errf, p, args=(x, ny1, x, ny2)) utils.assert_allclose(jf.fitparams, fitparams, rtol=10 ** (-5)) utils.assert_allclose(g1.amplitude.value, g2.amplitude.value) @pytest.mark.skipif('not HAS_SCIPY') def test_no_constraints(self): g1 = models.Gaussian1D(9.9, 14.5, stddev=.3) def func(p, x): return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2) def errf(p, x, y): return func(p, x) - y p0 = [9.9, 14.5, 0.3] y = g1(self.x) n = np.random.randn(100) ny = y + n fitpar, s = optimize.leastsq(errf, p0, args=(self.x, ny)) fitter = fitting.LevMarLSQFitter() model = fitter(g1, self.x, ny) utils.assert_allclose(model.parameters, fitpar, rtol=5 * 10 ** (-3)) @pytest.mark.skipif('not HAS_SCIPY') class TestBounds: def setup_class(self): A = -2.0 B = 0.5 self.x = np.linspace(-1.0, 1.0, 100) self.y = A * self.x + B + np.random.normal(scale=0.1, size=100) data = np.array([505.0, 556.0, 630.0, 595.0, 561.0, 553.0, 543.0, 496.0, 460.0, 469.0, 426.0, 518.0, 684.0, 798.0, 830.0, 794.0, 649.0, 706.0, 671.0, 545.0, 479.0, 454.0, 505.0, 700.0, 1058.0, 1231.0, 1325.0, 997.0, 1036.0, 884.0, 610.0, 487.0, 453.0, 527.0, 780.0, 1094.0, 1983.0, 1993.0, 1809.0, 1525.0, 1056.0, 895.0, 604.0, 466.0, 510.0, 678.0, 1130.0, 1986.0, 2670.0, 2535.0, 1878.0, 1450.0, 1200.0, 663.0, 511.0, 474.0, 569.0, 848.0, 1670.0, 2611.0, 3129.0, 2507.0, 1782.0, 1211.0, 723.0, 541.0, 511.0, 518.0, 597.0, 1137.0, 1993.0, 2925.0, 2438.0, 1910.0, 1230.0, 738.0, 506.0, 461.0, 486.0, 597.0, 733.0, 1262.0, 1896.0, 2342.0, 1792.0, 1180.0, 667.0, 482.0, 454.0, 482.0, 504.0, 566.0, 789.0, 1194.0, 1545.0, 1361.0, 933.0, 562.0, 418.0, 463.0, 435.0, 466.0, 528.0, 487.0, 664.0, 799.0, 746.0, 550.0, 478.0, 535.0, 443.0, 416.0, 439.0, 472.0, 472.0, 492.0, 523.0, 569.0, 487.0, 441.0, 428.0]) self.data = data.reshape(11, 11) def test_bounds_lsq(self): guess_slope = 1.1 guess_intercept = 0.0 bounds = {'slope': (-1.5, 5.0), 'intercept': (-1.0, 1.0)} line_model = models.Linear1D(guess_slope, guess_intercept, bounds=bounds) fitter = fitting.LevMarLSQFitter() model = fitter(line_model, self.x, self.y) slope = model.slope.value intercept = model.intercept.value assert slope + 10 ** -5 >= bounds['slope'][0] assert slope - 10 ** -5 <= bounds['slope'][1] assert intercept + 10 ** -5 >= bounds['intercept'][0] assert intercept - 10 ** -5 <= bounds['intercept'][1] def test_bounds_slsqp(self): guess_slope = 1.1 guess_intercept = 0.0 bounds = {'slope': (-1.5, 5.0), 'intercept': (-1.0, 1.0)} line_model = models.Linear1D(guess_slope, guess_intercept, bounds=bounds) fitter = fitting.SLSQPLSQFitter() with ignore_non_integer_warning(): model = fitter(line_model, self.x, self.y) slope = model.slope.value intercept = model.intercept.value assert slope + 10 ** -5 >= bounds['slope'][0] assert slope - 10 ** -5 <= bounds['slope'][1] assert intercept + 10 ** -5 >= bounds['intercept'][0] assert intercept - 10 ** -5 <= bounds['intercept'][1] def test_bounds_gauss2d_lsq(self): X, Y = np.meshgrid(np.arange(11), np.arange(11)) bounds = {"x_mean": [0., 11.], "y_mean": [0., 11.], "x_stddev": [1., 4], "y_stddev": [1., 4]} gauss = models.Gaussian2D(amplitude=10., x_mean=5., y_mean=5., x_stddev=4., y_stddev=4., theta=0.5, bounds=bounds) gauss_fit = fitting.LevMarLSQFitter() model = gauss_fit(gauss, X, Y, self.data) x_mean = model.x_mean.value y_mean = model.y_mean.value x_stddev = model.x_stddev.value y_stddev = model.y_stddev.value assert x_mean + 10 ** -5 >= bounds['x_mean'][0] assert x_mean - 10 ** -5 <= bounds['x_mean'][1] assert y_mean + 10 ** -5 >= bounds['y_mean'][0] assert y_mean - 10 ** -5 <= bounds['y_mean'][1] assert x_stddev + 10 ** -5 >= bounds['x_stddev'][0] assert x_stddev - 10 ** -5 <= bounds['x_stddev'][1] assert y_stddev + 10 ** -5 >= bounds['y_stddev'][0] assert y_stddev - 10 ** -5 <= bounds['y_stddev'][1] def test_bounds_gauss2d_slsqp(self): X, Y = np.meshgrid(np.arange(11), np.arange(11)) bounds = {"x_mean": [0., 11.], "y_mean": [0., 11.], "x_stddev": [1., 4], "y_stddev": [1., 4]} gauss = models.Gaussian2D(amplitude=10., x_mean=5., y_mean=5., x_stddev=4., y_stddev=4., theta=0.5, bounds=bounds) gauss_fit = fitting.SLSQPLSQFitter() with ignore_non_integer_warning(): model = gauss_fit(gauss, X, Y, self.data) x_mean = model.x_mean.value y_mean = model.y_mean.value x_stddev = model.x_stddev.value y_stddev = model.y_stddev.value assert x_mean + 10 ** -5 >= bounds['x_mean'][0] assert x_mean - 10 ** -5 <= bounds['x_mean'][1] assert y_mean + 10 ** -5 >= bounds['y_mean'][0] assert y_mean - 10 ** -5 <= bounds['y_mean'][1] assert x_stddev + 10 ** -5 >= bounds['x_stddev'][0] assert x_stddev - 10 ** -5 <= bounds['x_stddev'][1] assert y_stddev + 10 ** -5 >= bounds['y_stddev'][0] assert y_stddev - 10 ** -5 <= bounds['y_stddev'][1] class TestLinearConstraints: def setup_class(self): self.p1 = models.Polynomial1D(4) self.p1.c0 = 0 self.p1.c1 = 0 self.p1.window = [0., 9.] self.x = np.arange(10) self.y = self.p1(self.x) rsn = RandomState(1234567890) self.n = rsn.randn(10) self.ny = self.y + self.n def test(self): self.p1.c0.fixed = True self.p1.c1.fixed = True pfit = fitting.LinearLSQFitter() model = pfit(self.p1, self.x, self.y) utils.assert_allclose(self.y, model(self.x)) # Test constraints as parameter properties def test_set_fixed_1(): gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1) gauss.mean.fixed = True assert gauss.fixed == {'amplitude': False, 'mean': True, 'stddev': False} def test_set_fixed_2(): gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1, fixed={'mean': True}) assert gauss.mean.fixed is True def test_set_tied_1(): def tie_amplitude(model): return 50 * model.stddev gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1) gauss.amplitude.tied = tie_amplitude assert gauss.amplitude.tied is not False assert isinstance(gauss.tied['amplitude'], types.FunctionType) def test_set_tied_2(): def tie_amplitude(model): return 50 * model.stddev gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1, tied={'amplitude': tie_amplitude}) assert gauss.amplitude.tied def test_unset_fixed(): gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1, fixed={'mean': True}) gauss.mean.fixed = False assert gauss.fixed == {'amplitude': False, 'mean': False, 'stddev': False} def test_unset_tied(): def tie_amplitude(model): return 50 * model.stddev gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1, tied={'amplitude': tie_amplitude}) gauss.amplitude.tied = False assert gauss.tied == {'amplitude': False, 'mean': False, 'stddev': False} def test_set_bounds_1(): gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1, bounds={'stddev': (0, None)}) assert gauss.bounds == {'amplitude': (None, None), 'mean': (None, None), 'stddev': (0.0, None)} def test_set_bounds_2(): gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1) gauss.stddev.min = 0. assert gauss.bounds == {'amplitude': (None, None), 'mean': (None, None), 'stddev': (0.0, None)} def test_unset_bounds(): gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1, bounds={'stddev': (0, 2)}) gauss.stddev.min = None gauss.stddev.max = None assert gauss.bounds == {'amplitude': (None, None), 'mean': (None, None), 'stddev': (None, None)} def test_default_constraints(): """Regression test for https://github.com/astropy/astropy/issues/2396 Ensure that default constraints defined on parameters are carried through to instances of the models those parameters are defined for. """ class MyModel(Fittable1DModel): a = Parameter(default=1) b = Parameter(default=0, min=0, fixed=True) @staticmethod def evaluate(x, a, b): return x * a + b assert MyModel.a.default == 1 assert MyModel.b.default == 0 assert MyModel.b.min == 0 assert MyModel.b.bounds == (0, None) assert MyModel.b.fixed is True m = MyModel() assert m.a.value == 1 assert m.b.value == 0 assert m.b.min == 0 assert m.b.bounds == (0, None) assert m.b.fixed is True assert m.bounds == {'a': (None, None), 'b': (0, None)} assert m.fixed == {'a': False, 'b': True} # Make a model instance that overrides the default constraints and values m = MyModel(3, 4, bounds={'a': (1, None), 'b': (2, None)}, fixed={'a': True, 'b': False}) assert m.a.value == 3 assert m.b.value == 4 assert m.a.min == 1 assert m.b.min == 2 assert m.a.bounds == (1, None) assert m.b.bounds == (2, None) assert m.a.fixed is True assert m.b.fixed is False assert m.bounds == {'a': (1, None), 'b': (2, None)} assert m.fixed == {'a': True, 'b': False} @pytest.mark.skipif('not HAS_SCIPY') def test_fit_with_fixed_and_bound_constraints(): """ Regression test for https://github.com/astropy/astropy/issues/2235 Currently doesn't test that the fit is any *good*--just that parameters stay within their given constraints. """ m = models.Gaussian1D(amplitude=3, mean=4, stddev=1, bounds={'mean': (4, 5)}, fixed={'amplitude': True}) x = np.linspace(0, 10, 10) y = np.exp(-x ** 2 / 2) f = fitting.LevMarLSQFitter() fitted_1 = f(m, x, y) assert fitted_1.mean >= 4 assert fitted_1.mean <= 5 assert fitted_1.amplitude == 3.0 m.amplitude.fixed = False fitted_2 = f(m, x, y) # It doesn't matter anymore what the amplitude ends up as so long as the # bounds constraint was still obeyed assert fitted_1.mean >= 4 assert fitted_1.mean <= 5 @pytest.mark.skipif('not HAS_SCIPY') def test_fit_with_bound_constraints_estimate_jacobian(): """ Regression test for https://github.com/astropy/astropy/issues/2400 Checks that bounds constraints are obeyed on a custom model that does not define fit_deriv (and thus its Jacobian must be estimated for non-linear fitting). """ class MyModel(Fittable1DModel): a = Parameter(default=1) b = Parameter(default=2) @staticmethod def evaluate(x, a, b): return a * x + b m_real = MyModel(a=1.5, b=-3) x = np.arange(100) y = m_real(x) m = MyModel() f = fitting.LevMarLSQFitter() fitted_1 = f(m, x, y) # This fit should be trivial so even without constraints on the bounds it # should be right assert np.allclose(fitted_1.a, 1.5) assert np.allclose(fitted_1.b, -3) m2 = MyModel() m2.a.bounds = (-2, 2) f2 = fitting.LevMarLSQFitter() fitted_2 = f2(m2, x, y) assert np.allclose(fitted_1.a, 1.5) assert np.allclose(fitted_1.b, -3) # Check that the estimated Jacobian was computed (it doesn't matter what # the values are so long as they're not all zero. assert np.any(f2.fit_info['fjac'] != 0) # https://github.com/astropy/astropy/issues/6014 @pytest.mark.skipif('not HAS_SCIPY') def test_gaussian2d_positive_stddev(): # This is 2D Gaussian with noise to be fitted, as provided by @ysBach test = [ [-54.33, 13.81, -34.55, 8.95, -143.71, -0.81, 59.25, -14.78, -204.9, -30.87, -124.39, 123.53, 70.81, -109.48, -106.77, 35.64, 18.29], [-126.19, -89.13, 63.13, 50.74, 61.83, 19.06, 65.7, 77.94, 117.14, 139.37, 52.57, 236.04, 100.56, 242.28, -180.62, 154.02, -8.03], [91.43, 96.45, -118.59, -174.58, -116.49, 80.11, -86.81, 14.62, 79.26, 7.56, 54.99, 260.13, -136.42, -20.77, -77.55, 174.52, 134.41], [33.88, 7.63, 43.54, 70.99, 69.87, 33.97, 273.75, 176.66, 201.94, 336.34, 340.54, 163.77, -156.22, 21.49, -148.41, 94.88, 42.55], [82.28, 177.67, 26.81, 17.66, 47.81, -31.18, 353.23, 589.11, 553.27, 242.35, 444.12, 186.02, 140.73, 75.2, -87.98, -18.23, 166.74], [113.09, -37.01, 134.23, 71.89, 107.88, 198.69, 273.88, 626.63, 551.8, 547.61, 580.35, 337.8, 139.8, 157.64, -1.67, -26.99, 37.35], [106.47, 31.97, 84.99, -125.79, 195.0, 493.65, 861.89, 908.31, 803.9, 781.01, 532.59, 404.67, 115.18, 111.11, 28.08, 122.05, -58.36], [183.62, 45.22, 40.89, 111.58, 425.81, 321.53, 545.09, 866.02, 784.78, 731.35, 609.01, 405.41, -19.65, 71.2, -140.5, 144.07, 25.24], [137.13, -86.95, 15.39, 180.14, 353.23, 699.01, 1033.8, 1014.49, 814.11, 647.68, 461.03, 249.76, 94.8, 41.17, -1.16, 183.76, 188.19], [35.39, 26.92, 198.53, -37.78, 638.93, 624.41, 816.04, 867.28, 697.0, 491.56, 378.21, -18.46, -65.76, 98.1, 12.41, -102.18, 119.05], [190.73, 125.82, 311.45, 369.34, 554.39, 454.37, 755.7, 736.61, 542.43, 188.24, 214.86, 217.91, 7.91, 27.46, -172.14, -82.36, -80.31], [-55.39, 80.18, 267.19, 274.2, 169.53, 327.04, 488.15, 437.53, 225.38, 220.94, 4.01, -92.07, 39.68, 57.22, 144.66, 100.06, 34.96], [130.47, -4.23, 46.3, 101.49, 115.01, 217.38, 249.83, 115.9, 87.36, 105.81, -47.86, -9.94, -82.28, 144.45, 83.44, 23.49, 183.9], [-110.38, -115.98, 245.46, 103.51, 255.43, 163.47, 56.52, 33.82, -33.26, -111.29, 88.08, 193.2, -100.68, 15.44, 86.32, -26.44, -194.1], [109.36, 96.01, -124.89, -16.4, 84.37, 114.87, -65.65, -58.52, -23.22, 42.61, 144.91, -209.84, 110.29, 66.37, -117.85, -147.73, -122.51], [10.94, 45.98, 118.12, -46.53, -72.14, -74.22, 21.22, 0.39, 86.03, 23.97, -45.42, 12.05, -168.61, 27.79, 61.81, 84.07, 28.79], [46.61, -104.11, 56.71, -90.85, -16.51, -66.45, -141.34, 0.96, 58.08, 285.29, -61.41, -9.01, -323.38, 58.35, 80.14, -101.22, 145.65]] g_init = models.Gaussian2D(x_mean=8, y_mean=8) fitter = fitting.LevMarLSQFitter() y, x = np.mgrid[:17, :17] g_fit = fitter(g_init, x, y, test) # Compare with @ysBach original result: # - x_stddev was negative, so its abs value is used for comparison here. # - theta is beyond (-90, 90) deg, which doesn't make sense, so ignored. utils.assert_allclose([g_fit.amplitude.value, g_fit.y_stddev.value], [984.7694929790363, 3.1840618351417307], rtol=1.5e-6) utils.assert_allclose(g_fit.x_mean.value, 7.198391516587464) utils.assert_allclose(g_fit.y_mean.value, 7.49720660088511, rtol=5e-7) utils.assert_allclose(g_fit.x_stddev.value, 1.9840185107597297, rtol=2e-6) # Issue #6403 @pytest.mark.skipif('not HAS_SCIPY') def test_2d_model(): # 2D model with LevMarLSQFitter gauss2d = models.Gaussian2D(10.2, 4.3, 5,2, 1.2, 1.4) fitter = fitting.LevMarLSQFitter() X = np.linspace(-1, 7) Y = np.linspace(-1, 7) x, y = np.meshgrid(X, Y) z = gauss2d(x, y) w = np.ones(x.size) w.shape = x.shape rsn = RandomState(1234567890) n = rsn.randn(x.size) n = np.random.randn(x.size) n.shape = x.shape m = fitter(gauss2d, x, y, z+2*n, weights=w) utils.assert_allclose(m.parameters, gauss2d.parameters, rtol=1e-1) m = fitter(gauss2d, x, y, z+2*n, weights=None) utils.assert_allclose(m.parameters, gauss2d.parameters, rtol=1e-1) # 2D model with LevMarLSQFitter, fixed constraint gauss2d.x_stddev.fixed = True m = fitter(gauss2d, x, y, z+2*n, weights=w) utils.assert_allclose(m.parameters, gauss2d.parameters, rtol=1e-1) m = fitter(gauss2d, x, y, z+2*n, weights=None) utils.assert_allclose(m.parameters, gauss2d.parameters, rtol=1e-1) # Polynomial2D, col_fit_deriv=False p2 = models.Polynomial2D(1, c0_0=1, c1_0=1.2, c0_1=3.2) z = p2(x, y) m = fitter(p2, x, y, z + 2 * n, weights=None) utils.assert_allclose(m.parameters, p2.parameters, rtol=1.5e-1) m = fitter(p2, x, y, z + 2 * n, weights=w) utils.assert_allclose(m.parameters, p2.parameters, rtol=1.5e-1) # Polynomial2D, col_fit_deriv=False, fixed constraint p2.c1_0.fixed = True m = fitter(p2, x, y, z + 2 * n, weights=w) utils.assert_allclose(m.parameters, p2.parameters, rtol=1.5e-1) m = fitter(p2, x, y, z + 2 * n, weights=None) utils.assert_allclose(m.parameters, p2.parameters, rtol=1.5e-1)
1c5b4e1bf0ffd9754f5fb233c2132dd9a39bac449121a300a584cd3acc271a4f
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from numpy.testing.utils import assert_allclose, assert_array_equal from ..fitting import LevMarLSQFitter from ..models import Shift, Rotation2D, Gaussian1D, Identity, Mapping from ...utils import NumpyRNGContext try: from scipy import optimize # pylint: disable=W0611 HAS_SCIPY = True except ImportError: HAS_SCIPY = False def test_swap_axes(): x = np.zeros((2, 3)) y = np.ones((2, 3)) mapping = Mapping((1, 0)) assert(mapping(1, 2) == (2.0, 1.0)) assert(mapping.inverse(2, 1) == (1, 2)) assert_array_equal(mapping(x, y), (y, x)) assert_array_equal(mapping.inverse(y, x), (x, y)) def test_duplicate_axes(): mapping = Mapping((0, 1, 0, 1)) assert(mapping(1, 2) == (1.0, 2., 1., 2)) assert(mapping.inverse(1, 2, 1, 2) == (1, 2)) assert(mapping.inverse.n_inputs == 4) assert(mapping.inverse.n_outputs == 2) def test_drop_axes_1(): mapping = Mapping((0,), n_inputs=2) assert(mapping(1, 2) == (1.)) def test_drop_axes_2(): mapping = Mapping((1, )) assert(mapping(1, 2) == (2.)) with pytest.raises(NotImplementedError): mapping.inverse def test_drop_axes_3(): mapping = Mapping((1,), n_inputs=2) assert(mapping.n_inputs == 2) rotation = Rotation2D(60) model = rotation | mapping assert_allclose(model(1, 2), 1.86602540378) def test_identity(): x = np.zeros((2, 3)) y = np.ones((2, 3)) ident1 = Identity(1) shift = Shift(1) rotation = Rotation2D(angle=60) model = ident1 & shift | rotation assert_allclose(model(1, 2), (-2.098076211353316, 2.3660254037844393)) res_x, res_y = model(x, y) assert_allclose((res_x, res_y), (np.array([[-1.73205081, -1.73205081, -1.73205081], [-1.73205081, -1.73205081, -1.73205081]]), np.array([[1., 1., 1.], [1., 1., 1.]]))) assert_allclose(model.inverse(res_x, res_y), (x, y), atol=1.e-10) # https://github.com/astropy/astropy/pull/6018 @pytest.mark.skipif('not HAS_SCIPY') def test_fittable_compound(): m = Identity(1) | Mapping((0, )) | Gaussian1D(1, 5, 4) x = np.arange(10) y_real = m(x) dy = 0.005 with NumpyRNGContext(1234567): n = np.random.normal(0., dy, x.shape) y_noisy = y_real + n pfit = LevMarLSQFitter() new_model = pfit(m, x, y_noisy) y_fit = new_model(x) assert_allclose(y_fit, y_real, atol=dy)
c91d273e0fc91e7a107e5350526060370f329dacc624aee87b245ec241a92fd1
# Various tests of models not related to evaluation, fitting, or parameters from ...tests.helper import assert_quantity_allclose from ... import units as u from ..functional_models import Gaussian1D def test_gaussian1d_bounding_box(): g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy) bbox = g.bounding_box assert_quantity_allclose(bbox[0], 2.835 * u.m) assert_quantity_allclose(bbox[1], 3.165 * u.m) def test_gaussian1d_n_models(): g = Gaussian1D(amplitude=[1 * u.J, 2. * u.J], mean=[1 * u.m, 5000 * u.AA], stddev=[0.1 * u.m, 100 * u.AA], n_models=2) assert_quantity_allclose(g(1.01 * u.m), [0.99501248, 0.] * u.J) assert_quantity_allclose(g(u.Quantity([1.01 * u.m, 5010 * u.AA])), [0.99501248, 1.990025] * u.J) # FIXME: The following doesn't work as np.asanyarray doesn't work with a # list of quantity objects. # assert_quantity_allclose(g([1.01 * u.m, 5010 * u.AA]), # [ 0.99501248, 1.990025] * u.J)
cece9c02979a047d989442012aafc9837ecd201bfc07ebe823a864171a9d9007
from collections import OrderedDict import pytest import numpy as np from ... import units as u from ...tests.helper import assert_quantity_allclose from ..functional_models import (Gaussian1D, Sersic1D, Sine1D, Linear1D, Lorentz1D, Voigt1D, Const1D, Box1D, Trapezoid1D, MexicanHat1D, Moffat1D, Gaussian2D, Const2D, Ellipse2D, Disk2D, Ring2D, Box2D, TrapezoidDisk2D, MexicanHat2D, AiryDisk2D, Moffat2D, Sersic2D) from ..powerlaws import (PowerLaw1D, BrokenPowerLaw1D, SmoothlyBrokenPowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D) from ..polynomial import Polynomial1D, Polynomial2D from ..fitting import LevMarLSQFitter try: from scipy import optimize HAS_SCIPY = True except ImportError: HAS_SCIPY = False FUNC_MODELS_1D = [ {'class': Gaussian1D, 'parameters': {'amplitude': 3 * u.Jy, 'mean': 2 * u.m, 'stddev': 30 * u.cm}, 'evaluation': [(2600 * u.mm, 3 * u.Jy * np.exp(-2))], 'bounding_box': [0.35, 3.65] * u.m}, {'class': Sersic1D, 'parameters': {'amplitude': 3 * u.MJy / u.sr, 'r_eff': 2 * u.arcsec, 'n': 4}, 'evaluation': [(3 * u.arcsec, 1.3237148119468918 * u.MJy/u.sr)], 'bounding_box': False}, {'class': Sine1D, 'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.25 * u.Hz, 'phase': 0.5}, 'evaluation': [(1 * u.s, -3 * u.km / u.s)], 'bounding_box': False}, {'class': Linear1D, 'parameters': {'slope': 3 * u.km / u.s, 'intercept': 5000 * u.m}, 'evaluation': [(6000 * u.ms, 23 * u.km)], 'bounding_box': False}, {'class': Lorentz1D, 'parameters': {'amplitude': 2 * u.Jy, 'x_0': 505 * u.nm, 'fwhm': 100 * u.AA}, 'evaluation': [(0.51 * u.micron, 1 * u.Jy)], 'bounding_box': [255, 755] * u.nm}, {'class': Voigt1D, 'parameters': {'amplitude_L': 2 * u.Jy, 'x_0': 505 * u.nm, 'fwhm_L': 100 * u.AA, 'fwhm_G': 50 * u.AA}, 'evaluation': [(0.51 * u.micron, 1.06264568 * u.Jy)], 'bounding_box': False}, {'class': Const1D, 'parameters': {'amplitude': 3 * u.Jy}, 'evaluation': [(0.6 * u.micron, 3 * u.Jy)], 'bounding_box': False}, {'class': Box1D, 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'width': 1 * u.um}, 'evaluation': [(4200 * u.nm, 3 * u.Jy), (1 * u.m, 0 * u.Jy)], 'bounding_box': [3.9, 4.9] * u.um}, {'class': Trapezoid1D, 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'width': 1 * u.um, 'slope': 5 * u.Jy / u.um}, 'evaluation': [(4200 * u.nm, 3 * u.Jy), (1 * u.m, 0 * u.Jy)], 'bounding_box': [3.3, 5.5] * u.um}, {'class': MexicanHat1D, 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'sigma': 1e-3 * u.mm}, 'evaluation': [(1000 * u.nm, -0.09785050 * u.Jy)], 'bounding_box': [-5.6, 14.4] * u.um}, {'class': Moffat1D, 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'gamma': 1e-3 * u.mm, 'alpha': 1}, 'evaluation': [(1000 * u.nm, 0.238853503 * u.Jy)], 'bounding_box': False}, ] FUNC_MODELS_2D = [ {'class': Gaussian2D, 'parameters': {'amplitude': 3 * u.Jy, 'x_mean': 2 * u.m, 'y_mean': 1 * u.m, 'x_stddev': 3 * u.m, 'y_stddev': 2 * u.m, 'theta': 45 * u.deg}, 'evaluation': [(412.1320343 * u.cm, 3.121320343 * u.m, 3 * u.Jy * np.exp(-0.5))], 'bounding_box': [[-14.18257445, 16.18257445], [-10.75693665, 14.75693665]] * u.m}, {'class': Const2D, 'parameters': {'amplitude': 3 * u.Jy}, 'evaluation': [(0.6 * u.micron, 0.2 * u.m, 3 * u.Jy)], 'bounding_box': False}, {'class': Disk2D, 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m, 'R_0': 300 * u.cm}, 'evaluation': [(5.8 * u.m, 201 * u.cm, 3 * u.Jy)], 'bounding_box': [[-1, 5], [0, 6]] * u.m}, {'class': TrapezoidDisk2D, 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 1 * u.m, 'y_0': 2 * u.m, 'R_0': 100 * u.cm, 'slope': 1 * u.Jy / u.m}, 'evaluation': [(3.5 * u.m, 2 * u.m, 1.5 * u.Jy)], 'bounding_box': [[-2, 6], [-3, 5]] * u.m}, {'class': Ellipse2D, 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m, 'a': 300 * u.cm, 'b': 200 * u.cm, 'theta': 45 * u.deg}, 'evaluation': [(4 * u.m, 300 * u.cm, 3 * u.Jy)], 'bounding_box': [[-0.76046808, 4.76046808], [0.68055697, 5.31944302]] * u.m}, {'class': Ring2D, 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m, 'r_in': 2 * u.cm, 'r_out': 2.1 * u.cm}, 'evaluation': [(302.05 * u.cm, 2 * u.m + 10 * u.um, 3 * u.Jy)], 'bounding_box': [[1.979, 2.021], [2.979, 3.021]] * u.m}, {'class': Box2D, 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.s, 'x_width': 4 * u.cm, 'y_width': 3 * u.s}, 'evaluation': [(301 * u.cm, 3 * u.s, 3 * u.Jy)], 'bounding_box': [[0.5 * u.s, 3.5 * u.s], [2.98 * u.m, 3.02 * u.m]]}, {'class': MexicanHat2D, 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m, 'sigma': 1 * u.m}, 'evaluation': [(4 * u.m, 2.5 * u.m, 0.602169107 * u.Jy)], 'bounding_box': False}, {'class': AiryDisk2D, 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m, 'radius': 1 * u.m}, 'evaluation': [(4 * u.m, 2.1 * u.m, 4.76998480e-05 * u.Jy)], 'bounding_box': False}, {'class': Moffat2D, 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'y_0': 3.5 * u.um, 'gamma': 1e-3 * u.mm, 'alpha': 1}, 'evaluation': [(1000 * u.nm, 2 * u.um, 0.202565833 * u.Jy)], 'bounding_box': False}, {'class': Sersic2D, 'parameters': {'amplitude': 3 * u.MJy / u.sr, 'x_0': 1 * u.arcsec, 'y_0': 2 * u.arcsec, 'r_eff': 2 * u.arcsec, 'n': 4, 'ellip': 0, 'theta': 0}, 'evaluation': [(3 * u.arcsec, 2.5 * u.arcsec, 2.829990489 * u.MJy/u.sr)], 'bounding_box': False}, ] POWERLAW_MODELS = [ {'class': PowerLaw1D, 'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1}, 'evaluation': [(1 * u.m, 500 * u.g)], 'bounding_box': False}, {'class': BrokenPowerLaw1D, 'parameters': {'amplitude': 5 * u.kg, 'x_break': 10 * u.cm, 'alpha_1': 1, 'alpha_2': -1}, 'evaluation': [(1 * u.m, 50 * u.kg), (1 * u.cm, 50 * u.kg)], 'bounding_box': False}, {'class': SmoothlyBrokenPowerLaw1D, 'parameters': {'amplitude': 5 * u.kg, 'x_break': 10 * u.cm, 'alpha_1': 1, 'alpha_2': -1, 'delta': 1}, 'evaluation': [(1 * u.m, 15.125 * u.kg), (1 * u.cm, 15.125 * u.kg)], 'bounding_box': False}, {'class': ExponentialCutoffPowerLaw1D, 'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1, 'x_cutoff': 1 * u.m}, 'evaluation': [(1 * u.um, 499999.5 * u.kg), (10 * u.m, 50 * np.exp(-10) * u.g)], 'bounding_box': False}, {'class': LogParabola1D, 'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1, 'beta': 2}, 'evaluation': [(1 * u.cm, 5 * 0.1 ** (-1 - 2 * np.log(0.1)) * u.kg)], 'bounding_box': False} ] POLY_MODELS = [ {'class': Polynomial1D, 'parameters': {'degree': 2, 'c0': 3 * u.one, 'c1': 2 / u.m, 'c2': 3 / u.m**2}, 'evaluation': [(3 * u.m, 36 * u.one)], 'bounding_box': False}, {'class': Polynomial1D, 'parameters': {'degree': 2, 'c0': 3 * u.kg, 'c1': 2 * u.kg / u.m, 'c2': 3 * u.kg / u.m**2}, 'evaluation': [(3 * u.m, 36 * u.kg)], 'bounding_box': False}, {'class': Polynomial1D, 'parameters': {'degree': 2, 'c0': 3 * u.kg, 'c1': 2 * u.kg, 'c2': 3 * u.kg}, 'evaluation': [(3 * u.one, 36 * u.kg)], 'bounding_box': False}, {'class': Polynomial2D, 'parameters': {'degree': 2, 'c0_0': 3 * u.one, 'c1_0': 2 / u.m, 'c2_0': 3 / u.m**2, 'c0_1': 3 / u.s, 'c0_2': -2 / u.s**2, 'c1_1': 5 / u.m / u.s}, 'evaluation': [(3 * u.m, 2 * u.s, 64 * u.one)], 'bounding_box': False}, {'class': Polynomial2D, 'parameters': {'degree': 2, 'c0_0': 3 * u.kg, 'c1_0': 2 * u.kg / u.m, 'c2_0': 3 * u.kg / u.m**2, 'c0_1': 3 * u.kg / u.s, 'c0_2': -2 * u.kg / u.s**2, 'c1_1': 5 * u.kg / u.m / u.s}, 'evaluation': [(3 * u.m, 2 * u.s, 64 * u.kg)], 'bounding_box': False}, {'class': Polynomial2D, 'parameters': {'degree': 2, 'c0_0': 3 * u.kg, 'c1_0': 2 * u.kg, 'c2_0': 3 * u.kg, 'c0_1': 3 * u.kg, 'c0_2': -2 * u.kg, 'c1_1': 5 * u.kg}, 'evaluation': [(3 * u.one, 2 * u.one, 64 * u.kg)], 'bounding_box': False}, ] MODELS = FUNC_MODELS_1D + FUNC_MODELS_2D + POWERLAW_MODELS SCIPY_MODELS = set([Sersic1D, Sersic2D, AiryDisk2D]) @pytest.mark.parametrize('model', MODELS) def test_models_evaluate_without_units(model): if not HAS_SCIPY and model['class'] in SCIPY_MODELS: pytest.skip() m = model['class'](**model['parameters']) for args in model['evaluation']: if len(args) == 2: kwargs = OrderedDict(zip(('x', 'y'), args)) else: kwargs = OrderedDict(zip(('x', 'y', 'z'), args)) if kwargs['x'].unit.is_equivalent(kwargs['y'].unit): kwargs['x'] = kwargs['x'].to(kwargs['y'].unit) mnu = m.without_units_for_data(**kwargs) args = [x.value for x in kwargs.values()] assert_quantity_allclose(mnu(*args[:-1]), args[-1]) @pytest.mark.parametrize('model', MODELS) def test_models_evaluate_with_units(model): if not HAS_SCIPY and model['class'] in SCIPY_MODELS: pytest.skip() m = model['class'](**model['parameters']) for args in model['evaluation']: assert_quantity_allclose(m(*args[:-1]), args[-1]) @pytest.mark.parametrize('model', MODELS) def test_models_evaluate_with_units_x_array(model): if not HAS_SCIPY and model['class'] in SCIPY_MODELS: pytest.skip() m = model['class'](**model['parameters']) for args in model['evaluation']: if len(args) == 2: x, y = args x_arr = u.Quantity([x, x]) result = m(x_arr) assert_quantity_allclose(result, u.Quantity([y, y])) else: x, y, z = args x_arr = u.Quantity([x, x]) y_arr = u.Quantity([y, y]) result = m(x_arr, y_arr) assert_quantity_allclose(result, u.Quantity([z, z])) @pytest.mark.parametrize('model', MODELS) def test_models_evaluate_with_units_param_array(model): if not HAS_SCIPY and model['class'] in SCIPY_MODELS: pytest.skip() params = {} for key, value in model['parameters'].items(): if value is None or key == 'degree': params[key] = value else: params[key] = np.repeat(value, 2) params['n_models'] = 2 m = model['class'](**params) for args in model['evaluation']: if len(args) == 2: x, y = args x_arr = u.Quantity([x, x]) result = m(x_arr) assert_quantity_allclose(result, u.Quantity([y, y])) else: x, y, z = args x_arr = u.Quantity([x, x]) y_arr = u.Quantity([y, y]) result = m(x_arr, y_arr) assert_quantity_allclose(result, u.Quantity([z, z])) @pytest.mark.parametrize('model', MODELS) def test_models_bounding_box(model): # In some cases, having units in parameters caused bounding_box to break, # so this is to ensure that it works correctly. if not HAS_SCIPY and model['class'] in SCIPY_MODELS: pytest.skip() m = model['class'](**model['parameters']) # In the following we need to explicitly test that the value is False # since Quantities no longer evaluate as as True if model['bounding_box'] is False: # Check that NotImplementedError is raised, so that if bounding_box is # implemented we remember to set bounding_box=True in the list of models # above with pytest.raises(NotImplementedError): m.bounding_box else: # A bounding box may have inhomogeneous units so we need to check the # values one by one. for i in range(len(model['bounding_box'])): bbox = m.bounding_box assert_quantity_allclose(bbox[i], model['bounding_box'][i]) @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.parametrize('model', MODELS) def test_models_fitting(model): m = model['class'](**model['parameters']) if len(model['evaluation'][0]) == 2: x = np.linspace(1, 3, 100) * model['evaluation'][0][0].unit y = np.exp(-x.value ** 2) * model['evaluation'][0][1].unit args = [x, y] else: x = np.linspace(1, 3, 100) * model['evaluation'][0][0].unit y = np.linspace(1, 3, 100) * model['evaluation'][0][1].unit z = np.exp(-x.value**2 - y.value**2) * model['evaluation'][0][2].unit args = [x, y, z] # Test that the model fits even if it has units on parameters fitter = LevMarLSQFitter() m_new = fitter(m, *args) # Check that units have been put back correctly for param_name in m.param_names: par_bef = getattr(m, param_name) par_aft = getattr(m_new, param_name) if par_bef.unit is None: # If the parameter used to not have a unit then had a radian unit # for example, then we should allow that assert par_aft.unit is None or par_aft.unit is u.rad else: assert par_aft.unit.is_equivalent(par_bef.unit)
78631017985928b18f74ef15f46ff31954a184503bfca744893445a9c3a57588
# Licensed under a 3-clause BSD style license - see LICENSE.rst from ..errors import SAMPHubError, SAMPClientError, SAMPProxyError # By default, tests should not use the internet. from .. import conf def setup_module(module): conf.use_internet = False def test_SAMPHubError(): """Test that SAMPHubError can be instantiated""" SAMPHubError("test") def test_SAMPClientError(): """Test that SAMPClientError can be instantiated""" SAMPClientError("test") def test_SAMPProxyError(): """Test that SAMPProxyError can be instantiated""" SAMPProxyError("test", "any")
2dbca54c82243cbc2424ed108ac5bdf7af595fe83a14d27ea94877711f4ab29e
import ssl import tempfile import pytest from ...utils.data import get_pkg_data_filename from ..hub import SAMPHubServer from ..integrated_client import SAMPIntegratedClient from ..errors import SAMPProxyError # By default, tests should not use the internet. from .. import conf from .test_helpers import random_params, Receiver, assert_output, TEST_REPLY def setup_module(module): conf.use_internet = False class TestStandardProfile: @property def hub_init_kwargs(self): return {} @property def client_init_kwargs(self): return {} @property def client_connect_kwargs(self): return {} def setup_method(self, method): self.tmpdir = tempfile.mkdtemp() self.hub = SAMPHubServer(web_profile=False, mode='multiple', pool_size=1, **self.hub_init_kwargs) self.hub.start() self.client1 = SAMPIntegratedClient(**self.client_init_kwargs) self.client1.connect(hub=self.hub, pool_size=1, **self.client_connect_kwargs) self.client2 = SAMPIntegratedClient(**self.client_init_kwargs) self.client2.connect(hub=self.hub, pool_size=1, **self.client_connect_kwargs) def teardown_method(self, method): if self.client1.is_connected: self.client1.disconnect() if self.client2.is_connected: self.client2.disconnect() self.hub.stop() def test_main(self): self.client1_id = self.client1.get_public_id() self.client2_id = self.client2.get_public_id() self.metadata1 = {"samp.name": "Client 1", "samp.description.text": "Client 1 Description", "client.version": "1.1"} self.metadata2 = {"samp.name": "Client 2", "samp.description.text": "Client 2 Description", "client.version": "1.2"} # Check that the clients are connected assert self.client1.is_connected assert self.client2.is_connected # Check that ping works self.client1.ping() self.client2.ping() # Check that get_registered_clients works as expected. assert self.client1_id not in self.client1.get_registered_clients() assert self.client2_id in self.client1.get_registered_clients() assert self.client1_id in self.client2.get_registered_clients() assert self.client2_id not in self.client2.get_registered_clients() # Check that get_metadata works as expected assert self.client1.get_metadata(self.client1_id) == {} assert self.client1.get_metadata(self.client2_id) == {} assert self.client2.get_metadata(self.client1_id) == {} assert self.client2.get_metadata(self.client2_id) == {} self.client1.declare_metadata(self.metadata1) assert self.client1.get_metadata(self.client1_id) == self.metadata1 assert self.client2.get_metadata(self.client1_id) == self.metadata1 assert self.client1.get_metadata(self.client2_id) == {} assert self.client2.get_metadata(self.client2_id) == {} self.client2.declare_metadata(self.metadata2) assert self.client1.get_metadata(self.client1_id) == self.metadata1 assert self.client2.get_metadata(self.client1_id) == self.metadata1 assert self.client1.get_metadata(self.client2_id) == self.metadata2 assert self.client2.get_metadata(self.client2_id) == self.metadata2 # Check that, without subscriptions, sending a notification from one # client to another raises an error. message = {} message['samp.mtype'] = "table.load.votable" message['samp.params'] = {} with pytest.raises(SAMPProxyError): self.client1.notify(self.client2_id, message) # Check that there are no currently active subscriptions assert self.client1.get_subscribed_clients('table.load.votable') == {} assert self.client2.get_subscribed_clients('table.load.votable') == {} # We now test notifications and calls rec1 = Receiver(self.client1) rec2 = Receiver(self.client2) self.client2.bind_receive_notification('table.load.votable', rec2.receive_notification) self.client2.bind_receive_call('table.load.votable', rec2.receive_call) self.client1.bind_receive_response('test-tag', rec1.receive_response) # Check resulting subscriptions assert self.client1.get_subscribed_clients('table.load.votable') == {self.client2_id: {}} assert self.client2.get_subscribed_clients('table.load.votable') == {} assert 'table.load.votable' in self.client1.get_subscriptions(self.client2_id) assert 'table.load.votable' in self.client2.get_subscriptions(self.client2_id) # Once we have finished with the calls and notifications, we will # check the data got across correctly. # Test notify params = random_params(self.tmpdir) self.client1.notify(self.client2.get_public_id(), {'samp.mtype': 'table.load.votable', 'samp.params': params}) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) params = random_params(self.tmpdir) self.client1.enotify(self.client2.get_public_id(), "table.load.votable", **params) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) # Test notify_all params = random_params(self.tmpdir) self.client1.notify_all({'samp.mtype': 'table.load.votable', 'samp.params': params}) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) params = random_params(self.tmpdir) self.client1.enotify_all("table.load.votable", **params) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) # Test call params = random_params(self.tmpdir) self.client1.call(self.client2.get_public_id(), 'test-tag', {'samp.mtype': 'table.load.votable', 'samp.params': params}) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) params = random_params(self.tmpdir) self.client1.ecall(self.client2.get_public_id(), 'test-tag', "table.load.votable", **params) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) # Test call_all params = random_params(self.tmpdir) self.client1.call_all('tag1', {'samp.mtype': 'table.load.votable', 'samp.params': params}) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) params = random_params(self.tmpdir) self.client1.ecall_all('tag2', "table.load.votable", **params) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) # Test call_and_wait params = random_params(self.tmpdir) result = self.client1.call_and_wait(self.client2.get_public_id(), {'samp.mtype': 'table.load.votable', 'samp.params': params}, timeout=5) assert result == TEST_REPLY assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) params = random_params(self.tmpdir) result = self.client1.ecall_and_wait(self.client2.get_public_id(), "table.load.votable", timeout=5, **params) assert result == TEST_REPLY assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) # TODO: check that receive_response received the right data
f13089c16b3f84e04832093203978dc86736d8ce60c36ae47b3ce972d802f6a2
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest from ..hub_proxy import SAMPHubProxy from ..client import SAMPClient from ..integrated_client import SAMPIntegratedClient from ..hub import SAMPHubServer # By default, tests should not use the internet. from .. import conf def setup_module(module): conf.use_internet = False def test_SAMPHubProxy(): """Test that SAMPHubProxy can be instantiated""" SAMPHubProxy() def test_SAMPClient(): """Test that SAMPClient can be instantiated""" proxy = SAMPHubProxy() SAMPClient(proxy) def test_SAMPIntegratedClient(): """Test that SAMPIntegratedClient can be instantiated""" SAMPIntegratedClient() @pytest.fixture def samp_hub(request): """A fixture that can be used by client tests that require a HUB.""" my_hub = SAMPHubServer() my_hub.start() request.addfinalizer(my_hub.stop) def test_reconnect(samp_hub): """Test that SAMPIntegratedClient can reconnect. This is a regression test for bug [#2673] https://github.com/astropy/astropy/issues/2673 """ my_client = SAMPIntegratedClient() my_client.connect() my_client.disconnect() my_client.connect()
a87257f13e836991385def9972696a6ad4fcc6c76fdd4ebc428317adf6fd9a74
import sys from ..hub_script import hub_script from .. import conf def setup_module(module): conf.use_internet = False def setup_function(function): function.sys_argv_orig = sys.argv sys.argv = ["samp_hub"] def teardown_function(function): sys.argv = function.sys_argv_orig def test_hub_script(): sys.argv.append('-m') # run in multiple mode sys.argv.append('-w') # disable web profile hub_script(timeout=3)
e941b05be4c22b0aa2cdbe1da8722d30b81993236dce4a99474f83b20ebdf01c
from ..hub_proxy import SAMPHubProxy from ..hub import SAMPHubServer from .. import conf def setup_module(module): conf.use_internet = False class TestHubProxy: def setup_method(self, method): self.hub = SAMPHubServer(web_profile=False, mode='multiple', pool_size=1) self.hub.start() self.proxy = SAMPHubProxy() self.proxy.connect(hub=self.hub, pool_size=1) def teardown_method(self, method): if self.proxy.is_connected: self.proxy.disconnect() self.hub.stop() def test_is_connected(self): assert self.proxy.is_connected def test_disconnect(self): self.proxy.disconnect() def test_ping(self): self.proxy.ping() def test_registration(self): result = self.proxy.register(self.proxy.lockfile["samp.secret"]) self.proxy.unregister(result['samp.private-key']) def test_custom_lockfile(tmpdir): lockfile = tmpdir.join('.samptest').realpath().strpath hub = SAMPHubServer(web_profile=False, lockfile=lockfile, pool_size=1) hub.start() proxy = SAMPHubProxy() proxy.connect(hub=hub, pool_size=1) hub.stop()
af73cc9f4fa7517ef1b54d26d13bc3a47b6eaf6858be825cdb3e9712a970b008
""" Test the web profile using Python classes that have been adapted to act like a web client. We can only put a single test here because only one hub can run with the web profile active, and the user might want to run the tests in parallel. """ import os import threading import tempfile from urllib.request import Request, urlopen from ...utils.data import get_readable_fileobj from .. import SAMPIntegratedClient, SAMPHubServer from .web_profile_test_helpers import (AlwaysApproveWebProfileDialog, SAMPIntegratedWebClient) from ..web_profile import CROSS_DOMAIN, CLIENT_ACCESS_POLICY from .. import conf from .test_standard_profile import TestStandardProfile as BaseTestStandardProfile def setup_module(module): conf.use_internet = False class TestWebProfile(BaseTestStandardProfile): def setup_method(self, method): self.dialog = AlwaysApproveWebProfileDialog() t = threading.Thread(target=self.dialog.poll) t.start() self.tmpdir = tempfile.mkdtemp() lockfile = os.path.join(self.tmpdir, '.samp') self.hub = SAMPHubServer(web_profile_dialog=self.dialog, lockfile=lockfile, web_port=0, pool_size=1) self.hub.start() self.client1 = SAMPIntegratedClient() self.client1.connect(hub=self.hub, pool_size=1) self.client1_id = self.client1.get_public_id() self.client1_key = self.client1.get_private_key() self.client2 = SAMPIntegratedWebClient() self.client2.connect(web_port=self.hub._web_port, pool_size=2) self.client2_id = self.client2.get_public_id() self.client2_key = self.client2.get_private_key() def teardown_method(self, method): if self.client1.is_connected: self.client1.disconnect() if self.client2.is_connected: self.client2.disconnect() self.hub.stop() self.dialog.stop() # The full communication tests are run since TestWebProfile inherits # test_main from TestStandardProfile def test_web_profile(self): # Check some additional queries to the server with get_readable_fileobj('http://localhost:{0}/crossdomain.xml'.format(self.hub._web_port)) as f: assert f.read() == CROSS_DOMAIN with get_readable_fileobj('http://localhost:{0}/clientaccesspolicy.xml'.format(self.hub._web_port)) as f: assert f.read() == CLIENT_ACCESS_POLICY # Check headers req = Request('http://localhost:{0}/crossdomain.xml'.format(self.hub._web_port)) req.add_header('Origin', 'test_web_profile') resp = urlopen(req) assert resp.getheader('Access-Control-Allow-Origin') == 'test_web_profile' assert resp.getheader('Access-Control-Allow-Headers') == 'Content-Type' assert resp.getheader('Access-Control-Allow-Credentials') == 'true'
d71d9fdbc20071c880b4510bb9b437b2c85329b6a4e75a7e3ae2aa9c2a72a9bc
import os import time import pickle import random import string from .. import SAMP_STATUS_OK TEST_REPLY = {"samp.status": SAMP_STATUS_OK, "samp.result": {"txt": "test"}} def write_output(mtype, private_key, sender_id, params): filename = params['verification_file'] f = open(filename, 'wb') pickle.dump(mtype, f) pickle.dump(private_key, f) pickle.dump(sender_id, f) pickle.dump(params, f) f.close() def assert_output(mtype, private_key, sender_id, params, timeout=None): filename = params['verification_file'] start = time.time() while True: try: with open(filename, 'rb') as f: rec_mtype = pickle.load(f) rec_private_key = pickle.load(f) rec_sender_id = pickle.load(f) rec_params = pickle.load(f) break except (OSError, EOFError): if timeout is not None and time.time() - start > timeout: raise Exception("Timeout while waiting for file: {0}".format(filename)) assert rec_mtype == mtype assert rec_private_key == private_key assert rec_sender_id == sender_id assert rec_params == params class Receiver: def __init__(self, client): self.client = client def receive_notification(self, private_key, sender_id, mtype, params, extra): write_output(mtype, private_key, sender_id, params) def receive_call(self, private_key, sender_id, msg_id, mtype, params, extra): # Here we need to make sure that we first reply, *then* write out the # file, otherwise the tests see the file and move to the next call # before waiting for the reply to be received. self.client.reply(msg_id, TEST_REPLY) self.receive_notification(private_key, sender_id, mtype, params, extra) def receive_response(self, private_key, sender_id, msg_id, response): pass def random_id(length=16): return ''.join(random.sample(string.ascii_letters + string.digits, length)) def random_params(directory): return {'verification_file': os.path.join(directory, random_id()), 'parameter1': 'abcde', 'parameter2': 1331}
0cc7dfa00c37ff0bf92c43fdbf866a5930971112cae82a916ab1bd9366339a79
import time import threading import xmlrpc.client as xmlrpc from ..hub import WebProfileDialog from ..hub_proxy import SAMPHubProxy from ..client import SAMPClient from ..integrated_client import SAMPIntegratedClient from ..utils import ServerProxyPool from ..errors import SAMPClientError, SAMPHubError class AlwaysApproveWebProfileDialog(WebProfileDialog): def __init__(self): self.polling = True WebProfileDialog.__init__(self) def show_dialog(self, *args): self.consent() def poll(self): while self.polling: self.handle_queue() time.sleep(0.1) def stop(self): self.polling = False class SAMPWebHubProxy(SAMPHubProxy): """ Proxy class to simplify the client interaction with a SAMP hub (via the web profile). In practice web clients should run from the browser, so this is provided as a means of testing a hub's support for the web profile from Python. """ def connect(self, pool_size=20, web_port=21012): """ Connect to the current SAMP Hub on localhost:web_port Parameters ---------- pool_size : int, optional The number of socket connections opened to communicate with the Hub. """ self._connected = False try: self.proxy = ServerProxyPool(pool_size, xmlrpc.ServerProxy, 'http://127.0.0.1:{0}'.format(web_port), allow_none=1) self.ping() self._connected = True except xmlrpc.ProtocolError as p: raise SAMPHubError("Protocol Error {}: {}".format(p.errcode, p.errmsg)) @property def _samp_hub(self): """ Property to abstract away the path to the hub, which allows this class to be used for both the standard and the web profile. """ return self.proxy.samp.webhub def set_xmlrpc_callback(self, private_key, xmlrpc_addr): raise NotImplementedError("set_xmlrpc_callback is not defined for the " "web profile") def register(self, identity_info): """ Proxy to ``register`` SAMP Hub method. """ return self._samp_hub.register(identity_info) def allow_reverse_callbacks(self, private_key, allow): """ Proxy to ``allowReverseCallbacks`` SAMP Hub method. """ return self._samp_hub.allowReverseCallbacks(private_key, allow) def pull_callbacks(self, private_key, timeout): """ Proxy to ``pullCallbacks`` SAMP Hub method. """ return self._samp_hub.pullCallbacks(private_key, timeout) class SAMPWebClient(SAMPClient): """ Utility class which provides facilities to create and manage a SAMP compliant XML-RPC server that acts as SAMP callable web client application. In practice web clients should run from the browser, so this is provided as a means of testing a hub's support for the web profile from Python. Parameters ---------- hub : :class:`~astropy.samp.hub_proxy.SAMPWebHubProxy` An instance of :class:`~astropy.samp.hub_proxy.SAMPWebHubProxy` to be used for messaging with the SAMP Hub. name : str, optional Client name (corresponding to ``samp.name`` metadata keyword). description : str, optional Client description (corresponding to ``samp.description.text`` metadata keyword). metadata : dict, optional Client application metadata in the standard SAMP format. callable : bool, optional Whether the client can receive calls and notifications. If set to `False`, then the client can send notifications and calls, but can not receive any. """ def __init__(self, hub, name=None, description=None, metadata=None, callable=True): # GENERAL self._is_running = False self._is_registered = False if metadata is None: metadata = {} if name is not None: metadata["samp.name"] = name if description is not None: metadata["samp.description.text"] = description self._metadata = metadata self._callable = callable # HUB INTERACTION self.client = None self._public_id = None self._private_key = None self._hub_id = None self._notification_bindings = {} self._call_bindings = {"samp.app.ping": [self._ping, {}], "client.env.get": [self._client_env_get, {}]} self._response_bindings = {} self.hub = hub if self._callable: self._thread = threading.Thread(target=self._serve_forever) self._thread.daemon = True def _serve_forever(self): while self.is_running: # Watch for callbacks here if self._is_registered: results = self.hub.pull_callbacks(self.get_private_key(), 0) for result in results: if result['samp.methodName'] == 'receiveNotification': self.receive_notification(self._private_key, *result['samp.params']) elif result['samp.methodName'] == 'receiveCall': self.receive_call(self._private_key, *result['samp.params']) elif result['samp.methodName'] == 'receiveResponse': self.receive_response(self._private_key, *result['samp.params']) self.hub.server_close() def register(self): """ Register the client to the SAMP Hub. """ if self.hub.is_connected: if self._private_key is not None: raise SAMPClientError("Client already registered") result = self.hub.register("Astropy SAMP Web Client") if result["samp.self-id"] == "": raise SAMPClientError("Registation failed - samp.self-id " "was not set by the hub.") if result["samp.private-key"] == "": raise SAMPClientError("Registation failed - samp.private-key " "was not set by the hub.") self._public_id = result["samp.self-id"] self._private_key = result["samp.private-key"] self._hub_id = result["samp.hub-id"] if self._callable: self._declare_subscriptions() self.hub.allow_reverse_callbacks(self._private_key, True) if self._metadata != {}: self.declare_metadata() self._is_registered = True else: raise SAMPClientError("Unable to register to the SAMP Hub. Hub " "proxy not connected.") class SAMPIntegratedWebClient(SAMPIntegratedClient): """ A Simple SAMP web client. In practice web clients should run from the browser, so this is provided as a means of testing a hub's support for the web profile from Python. This class is meant to simplify the client usage providing a proxy class that merges the :class:`~astropy.samp.client.SAMPWebClient` and :class:`~astropy.samp.hub_proxy.SAMPWebHubProxy` functionalities in a simplified API. Parameters ---------- name : str, optional Client name (corresponding to ``samp.name`` metadata keyword). description : str, optional Client description (corresponding to ``samp.description.text`` metadata keyword). metadata : dict, optional Client application metadata in the standard SAMP format. callable : bool, optional Whether the client can receive calls and notifications. If set to `False`, then the client can send notifications and calls, but can not receive any. """ def __init__(self, name=None, description=None, metadata=None, callable=True): self.hub = SAMPWebHubProxy() self.client = SAMPWebClient(self.hub, name, description, metadata, callable) def connect(self, pool_size=20, web_port=21012): """ Connect with the current or specified SAMP Hub, start and register the client. Parameters ---------- pool_size : int, optional The number of socket connections opened to communicate with the Hub. """ self.hub.connect(pool_size, web_port=web_port) self.client.start() self.client.register()
3fca71fac501cb184272df3bdc716a0967118cfab529c7d98bd05f3b8a3747f8
# Licensed under a 3-clause BSD style license - see LICENSE.rst import time from ..hub import SAMPHubServer from .. import conf def setup_module(module): conf.use_internet = False def test_SAMPHubServer(): """Test that SAMPHub can be instantiated""" SAMPHubServer(web_profile=False, mode='multiple', pool_size=1) def test_SAMPHubServer_run(): """Test that SAMPHub can be run""" hub = SAMPHubServer(web_profile=False, mode='multiple', pool_size=1) hub.start() time.sleep(1) hub.stop() def test_SAMPHubServer_run_repeated(): """ Test that SAMPHub can be restarted after it has been stopped, including when web profile support is enabled. """ hub = SAMPHubServer(web_profile=True, mode='multiple', pool_size=1) hub.start() time.sleep(1) hub.stop() time.sleep(1) hub.start() time.sleep(1) hub.stop()
c92f32de255b98686b22a94bd627098143818c940b4392826000e60d71d10759
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ The astropy.utils.iers package provides access to the tables provided by the International Earth Rotation and Reference Systems Service, in particular allowing interpolation of published UT1-UTC values for given times. These are used in `astropy.time` to provide UT1 values. The polar motions are also used for determining earth orientation for celestial-to-terrestrial coordinate transformations (in `astropy.coordinates`). """ from warnings import warn try: from urlparse import urlparse except ImportError: from urllib.parse import urlparse import numpy as np from ... import config as _config from ... import units as u from ...table import Table, QTable from ...utils.data import get_pkg_data_filename, clear_download_cache from ... import utils from ...utils.exceptions import AstropyWarning __all__ = ['Conf', 'conf', 'IERS', 'IERS_B', 'IERS_A', 'IERS_Auto', 'FROM_IERS_B', 'FROM_IERS_A', 'FROM_IERS_A_PREDICTION', 'TIME_BEFORE_IERS_RANGE', 'TIME_BEYOND_IERS_RANGE', 'IERS_A_FILE', 'IERS_A_URL', 'IERS_A_README', 'IERS_B_FILE', 'IERS_B_URL', 'IERS_B_README', 'IERSRangeError', 'IERSStaleWarning'] # IERS-A default file name, URL, and ReadMe with content description IERS_A_FILE = 'finals2000A.all' IERS_A_URL = 'http://maia.usno.navy.mil/ser7/finals2000A.all' IERS_A_README = get_pkg_data_filename('data/ReadMe.finals2000A') # IERS-B default file name, URL, and ReadMe with content description IERS_B_FILE = get_pkg_data_filename('data/eopc04_IAU2000.62-now') IERS_B_URL = 'http://hpiers.obspm.fr/iers/eop/eopc04/eopc04_IAU2000.62-now' IERS_B_README = get_pkg_data_filename('data/ReadMe.eopc04_IAU2000') # Status/source values returned by IERS.ut1_utc FROM_IERS_B = 0 FROM_IERS_A = 1 FROM_IERS_A_PREDICTION = 2 TIME_BEFORE_IERS_RANGE = -1 TIME_BEYOND_IERS_RANGE = -2 MJD_ZERO = 2400000.5 INTERPOLATE_ERROR = """\ interpolating from IERS_Auto using predictive values that are more than {} days old. Normally you should not see this error because this class automatically downloads the latest IERS-A table. Perhaps you are offline? If you understand what you are doing then this error can be suppressed by setting the auto_max_age configuration variable to ``None``: from astropy.utils.iers import conf conf.auto_max_age = None """ def download_file(*args, **kwargs): """ Overload astropy.utils.data.download_file within iers module to use a custom (longer) wait time. This just passes through ``*args`` and ``**kwargs`` after temporarily setting the download_file remote timeout to the local ``iers.conf.remote_timeout`` value. """ with utils.data.conf.set_temp('remote_timeout', conf.remote_timeout): return utils.data.download_file(*args, **kwargs) class IERSStaleWarning(AstropyWarning): pass class Conf(_config.ConfigNamespace): """ Configuration parameters for `astropy.utils.iers`. """ auto_download = _config.ConfigItem( True, 'Enable auto-downloading of the latest IERS data. If set to False ' 'then the local IERS-B file will be used by default. Default is True.') auto_max_age = _config.ConfigItem( 30.0, 'Maximum age (days) of predictive data before auto-downloading. Default is 30.') iers_auto_url = _config.ConfigItem( IERS_A_URL, 'URL for auto-downloading IERS file data.') remote_timeout = _config.ConfigItem( 10.0, 'Remote timeout downloading IERS file data (seconds).') conf = Conf() class IERSRangeError(IndexError): """ Any error for when dates are outside of the valid range for IERS """ class IERS(QTable): """Generic IERS table class, defining interpolation functions. Sub-classed from `astropy.table.QTable`. The table should hold columns 'MJD', 'UT1_UTC', 'dX_2000A'/'dY_2000A', and 'PM_x'/'PM_y'. """ iers_table = None @classmethod def open(cls, file=None, cache=False, **kwargs): """Open an IERS table, reading it from a file if not loaded before. Parameters ---------- file : str or None full local or network path to the ascii file holding IERS data, for passing on to the ``read`` class methods (further optional arguments that are available for some IERS subclasses can be added). If None, use the default location from the ``read`` class method. cache : bool Whether to use cache. Defaults to False, since IERS files are regularly updated. Returns ------- An IERS table class instance Notes ----- On the first call in a session, the table will be memoized (in the ``iers_table`` class attribute), and further calls to ``open`` will return this stored table if ``file=None`` (the default). If a table needs to be re-read from disk, pass on an explicit file location or use the (sub-class) close method and re-open. If the location is a network location it is first downloaded via download_file. For the IERS class itself, an IERS_B sub-class instance is opened. """ if file is not None or cls.iers_table is None: if file is not None: if urlparse(file).netloc: kwargs.update(file=download_file(file, cache=cache)) else: kwargs.update(file=file) cls.iers_table = cls.read(**kwargs) return cls.iers_table @classmethod def close(cls): """Remove the IERS table from the class. This allows the table to be re-read from disk during one's session (e.g., if one finds it is out of date and has updated the file). """ cls.iers_table = None def mjd_utc(self, jd1, jd2=0.): """Turn a time to MJD, returning integer and fractional parts. Parameters ---------- jd1 : float, array, or Time first part of two-part JD, or Time object jd2 : float or array, optional second part of two-part JD. Default is 0., ignored if jd1 is `~astropy.time.Time`. Returns ------- mjd : float or array integer part of MJD utc : float or array fractional part of MJD """ try: # see if this is a Time object jd1, jd2 = jd1.utc.jd1, jd1.utc.jd2 except Exception: pass mjd = np.floor(jd1 - MJD_ZERO + jd2) utc = jd1 - (MJD_ZERO+mjd) + jd2 return mjd, utc def ut1_utc(self, jd1, jd2=0., return_status=False): """Interpolate UT1-UTC corrections in IERS Table for given dates. Parameters ---------- jd1 : float, float array, or Time object first part of two-part JD, or Time object jd2 : float or float array, optional second part of two-part JD. Default is 0., ignored if jd1 is `~astropy.time.Time`. return_status : bool Whether to return status values. If False (default), raise ``IERSRangeError`` if any time is out of the range covered by the IERS table. Returns ------- ut1_utc : float or float array UT1-UTC, interpolated in IERS Table status : int or int array Status values (if ``return_status``=``True``):: ``iers.FROM_IERS_B`` ``iers.FROM_IERS_A`` ``iers.FROM_IERS_A_PREDICTION`` ``iers.TIME_BEFORE_IERS_RANGE`` ``iers.TIME_BEYOND_IERS_RANGE`` """ return self._interpolate(jd1, jd2, ['UT1_UTC'], self.ut1_utc_source if return_status else None) def dcip_xy(self, jd1, jd2=0., return_status=False): """Interpolate CIP corrections in IERS Table for given dates. Parameters ---------- jd1 : float, float array, or Time object first part of two-part JD, or Time object jd2 : float or float array, optional second part of two-part JD (default 0., ignored if jd1 is Time) return_status : bool Whether to return status values. If False (default), raise ``IERSRangeError`` if any time is out of the range covered by the IERS table. Returns ------- D_x : Quantity with angle units x component of CIP correction for the requested times D_y : Quantity with angle units y component of CIP correction for the requested times status : int or int array Status values (if ``return_status``=``True``):: ``iers.FROM_IERS_B`` ``iers.FROM_IERS_A`` ``iers.FROM_IERS_A_PREDICTION`` ``iers.TIME_BEFORE_IERS_RANGE`` ``iers.TIME_BEYOND_IERS_RANGE`` """ return self._interpolate(jd1, jd2, ['dX_2000A', 'dY_2000A'], self.dcip_source if return_status else None) def pm_xy(self, jd1, jd2=0., return_status=False): """Interpolate polar motions from IERS Table for given dates. Parameters ---------- jd1 : float, float array, or Time object first part of two-part JD, or Time object jd2 : float or float array, optional second part of two-part JD. Default is 0., ignored if jd1 is `~astropy.time.Time`. return_status : bool Whether to return status values. If False (default), raise ``IERSRangeError`` if any time is out of the range covered by the IERS table. Returns ------- PM_x : Quantity with angle units x component of polar motion for the requested times PM_y : Quantity with angle units y component of polar motion for the requested times status : int or int array Status values (if ``return_status``=``True``):: ``iers.FROM_IERS_B`` ``iers.FROM_IERS_A`` ``iers.FROM_IERS_A_PREDICTION`` ``iers.TIME_BEFORE_IERS_RANGE`` ``iers.TIME_BEYOND_IERS_RANGE`` """ return self._interpolate(jd1, jd2, ['PM_x', 'PM_y'], self.pm_source if return_status else None) def _check_interpolate_indices(self, indices_orig, indices_clipped, max_input_mjd): """ Check that the indices from interpolation match those after clipping to the valid table range. This method gets overridden in the IERS_Auto class because it has different requirements. """ if np.any(indices_orig != indices_clipped): raise IERSRangeError('(some) times are outside of range covered ' 'by IERS table.') def _interpolate(self, jd1, jd2, columns, source=None): mjd, utc = self.mjd_utc(jd1, jd2) # enforce array is_scalar = not hasattr(mjd, '__array__') or mjd.ndim == 0 if is_scalar: mjd = np.array([mjd]) utc = np.array([utc]) self._refresh_table_as_needed(mjd) # For typical format, will always find a match (since MJD are integer) # hence, important to define which side we will be; this ensures # self['MJD'][i-1]<=mjd<self['MJD'][i] i = np.searchsorted(self['MJD'].value, mjd, side='right') # Get index to MJD at or just below given mjd, clipping to ensure we # stay in range of table (status will be set below for those outside) i1 = np.clip(i, 1, len(self) - 1) i0 = i1 - 1 mjd_0, mjd_1 = self['MJD'][i0].value, self['MJD'][i1].value results = [] for column in columns: val_0, val_1 = self[column][i0], self[column][i1] d_val = val_1 - val_0 if column == 'UT1_UTC': # Check & correct for possible leap second (correcting diff., # not 1st point, since jump can only happen right at 2nd point) d_val -= d_val.round() # Linearly interpolate (which is what TEMPO does for UT1-UTC, but # may want to follow IERS gazette #13 for more precise # interpolation and correction for tidal effects; # http://maia.usno.navy.mil/iers-gaz13) val = val_0 + (mjd - mjd_0 + utc) / (mjd_1 - mjd_0) * d_val # Do not extrapolate outside range, instead just propagate last values. val[i == 0] = self[column][0] val[i == len(self)] = self[column][-1] if is_scalar: val = val[0] results.append(val) if source: # Set status to source, using the routine passed in. status = source(i1) # Check for out of range status[i == 0] = TIME_BEFORE_IERS_RANGE status[i == len(self)] = TIME_BEYOND_IERS_RANGE if is_scalar: status = status[0] results.append(status) return results else: self._check_interpolate_indices(i1, i, np.max(mjd)) return results[0] if len(results) == 1 else results def _refresh_table_as_needed(self, mjd): """ Potentially update the IERS table in place depending on the requested time values in ``mdj`` and the time span of the table. The base behavior is not to update the table. ``IERS_Auto`` overrides this method. """ pass def ut1_utc_source(self, i): """Source for UT1-UTC. To be overridden by subclass.""" return np.zeros_like(i) def dcip_source(self, i): """Source for CIP correction. To be overridden by subclass.""" return np.zeros_like(i) def pm_source(self, i): """Source for polar motion. To be overridden by subclass.""" return np.zeros_like(i) @property def time_now(self): """ Property to provide the current time, but also allow for explicitly setting the _time_now attribute for testing purposes. """ from astropy.time import Time try: return self._time_now except Exception: return Time.now() class IERS_A(IERS): """IERS Table class targeted to IERS A, provided by USNO. These include rapid turnaround and predicted times. See http://maia.usno.navy.mil/ Notes ----- The IERS A file is not part of astropy. It can be downloaded from ``iers.IERS_A_URL``. See ``iers.__doc__`` for instructions on how to use it in ``Time``, etc. """ iers_table = None @classmethod def _combine_a_b_columns(cls, iers_a): """ Return a new table with appropriate combination of IERS_A and B columns. """ # IERS A has some rows at the end that hold nothing but dates & MJD # presumably to be filled later. Exclude those a priori -- there # should at least be a predicted UT1-UTC and PM! table = iers_a[~iers_a['UT1_UTC_A'].mask & ~iers_a['PolPMFlag_A'].mask] # This does nothing for IERS_A, but allows IERS_Auto to ensure the # IERS B values in the table are consistent with the true ones. table = cls._substitute_iers_b(table) # Run np.where on the data from the table columns, since in numpy 1.9 # it otherwise returns an only partially initialized column. table['UT1_UTC'] = np.where(table['UT1_UTC_B'].mask, table['UT1_UTC_A'].data, table['UT1_UTC_B'].data) # Ensure the unit is correct, for later column conversion to Quantity. table['UT1_UTC'].unit = table['UT1_UTC_A'].unit table['UT1Flag'] = np.where(table['UT1_UTC_B'].mask, table['UT1Flag_A'].data, 'B') # Repeat for polar motions. table['PM_x'] = np.where(table['PM_X_B'].mask, table['PM_x_A'].data, table['PM_X_B'].data) table['PM_x'].unit = table['PM_x_A'].unit table['PM_y'] = np.where(table['PM_Y_B'].mask, table['PM_y_A'].data, table['PM_Y_B'].data) table['PM_y'].unit = table['PM_y_A'].unit table['PolPMFlag'] = np.where(table['PM_X_B'].mask, table['PolPMFlag_A'].data, 'B') table['dX_2000A'] = np.where(table['dX_2000A_B'].mask, table['dX_2000A_A'].data, table['dX_2000A_B'].data) table['dX_2000A'].unit = table['dX_2000A_A'].unit table['dY_2000A'] = np.where(table['dY_2000A_B'].mask, table['dY_2000A_A'].data, table['dY_2000A_B'].data) table['dY_2000A'].unit = table['dY_2000A_A'].unit table['NutFlag'] = np.where(table['dX_2000A_B'].mask, table['NutFlag_A'].data, 'B') # Get the table index for the first row that has predictive values # PolPMFlag_A IERS (I) or Prediction (P) flag for # Bull. A polar motion values # UT1Flag_A IERS (I) or Prediction (P) flag for # Bull. A UT1-UTC values is_predictive = (table['UT1Flag_A'] == 'P') | (table['PolPMFlag_A'] == 'P') table.meta['predictive_index'] = np.min(np.flatnonzero(is_predictive)) table.meta['predictive_mjd'] = table['MJD'][table.meta['predictive_index']] return table @classmethod def _substitute_iers_b(cls, table): # See documentation in IERS_Auto. return table @classmethod def read(cls, file=None, readme=None): """Read IERS-A table from a finals2000a.* file provided by USNO. Parameters ---------- file : str full path to ascii file holding IERS-A data. Defaults to ``iers.IERS_A_FILE``. readme : str full path to ascii file holding CDS-style readme. Defaults to package version, ``iers.IERS_A_README``. Returns ------- ``IERS_A`` class instance """ if file is None: file = IERS_A_FILE if readme is None: readme = IERS_A_README # Read in as a regular Table, including possible masked columns. # Columns will be filled and converted to Quantity in cls.__init__. iers_a = Table.read(file, format='cds', readme=readme) # Combine the A and B data for UT1-UTC and PM columns table = cls._combine_a_b_columns(iers_a) table.meta['data_path'] = file table.meta['readme_path'] = readme # Fill any masked values, and convert to a QTable. return cls(table.filled()) def ut1_utc_source(self, i): """Set UT1-UTC source flag for entries in IERS table""" ut1flag = self['UT1Flag'][i] source = np.ones_like(i) * FROM_IERS_B source[ut1flag == 'I'] = FROM_IERS_A source[ut1flag == 'P'] = FROM_IERS_A_PREDICTION return source def dcip_source(self, i): """Set CIP correction source flag for entries in IERS table""" nutflag = self['NutFlag'][i] source = np.ones_like(i) * FROM_IERS_B source[nutflag == 'I'] = FROM_IERS_A source[nutflag == 'P'] = FROM_IERS_A_PREDICTION return source def pm_source(self, i): """Set polar motion source flag for entries in IERS table""" pmflag = self['PolPMFlag'][i] source = np.ones_like(i) * FROM_IERS_B source[pmflag == 'I'] = FROM_IERS_A source[pmflag == 'P'] = FROM_IERS_A_PREDICTION return source class IERS_B(IERS): """IERS Table class targeted to IERS B, provided by IERS itself. These are final values; see http://www.iers.org/ Notes ----- If the package IERS B file (```iers.IERS_B_FILE``) is out of date, a new version can be downloaded from ``iers.IERS_B_URL``. """ iers_table = None @classmethod def read(cls, file=None, readme=None, data_start=14): """Read IERS-B table from a eopc04_iau2000.* file provided by IERS. Parameters ---------- file : str full path to ascii file holding IERS-B data. Defaults to package version, ``iers.IERS_B_FILE``. readme : str full path to ascii file holding CDS-style readme. Defaults to package version, ``iers.IERS_B_README``. data_start : int starting row. Default is 14, appropriate for standard IERS files. Returns ------- ``IERS_B`` class instance """ if file is None: file = IERS_B_FILE if readme is None: readme = IERS_B_README # Read in as a regular Table, including possible masked columns. # Columns will be filled and converted to Quantity in cls.__init__. iers_b = Table.read(file, format='cds', readme=readme, data_start=data_start) return cls(iers_b.filled()) def ut1_utc_source(self, i): """Set UT1-UTC source flag for entries in IERS table""" return np.ones_like(i) * FROM_IERS_B def dcip_source(self, i): """Set CIP correction source flag for entries in IERS table""" return np.ones_like(i) * FROM_IERS_B def pm_source(self, i): """Set PM source flag for entries in IERS table""" return np.ones_like(i) * FROM_IERS_B class IERS_Auto(IERS_A): """ Provide most-recent IERS data and automatically handle downloading of updated values as necessary. """ iers_table = None @classmethod def open(cls): """If the configuration setting ``astropy.utils.iers.conf.auto_download`` is set to True (default), then open a recent version of the IERS-A table with predictions for UT1-UTC and polar motion out to approximately one year from now. If the available version of this file is older than ``astropy.utils.iers.conf.auto_max_age`` days old (or non-existent) then it will be downloaded over the network and cached. If the configuration setting ``astropy.utils.iers.conf.auto_download`` is set to False then ``astropy.utils.iers.IERS()`` is returned. This is normally the IERS-B table that is supplied with astropy. On the first call in a session, the table will be memoized (in the ``iers_table`` class attribute), and further calls to ``open`` will return this stored table. Returns ------- `~astropy.table.QTable` instance with IERS (Earth rotation) data columns """ if not conf.auto_download: cls.iers_table = IERS.open() return cls.iers_table if cls.iers_table is not None: # If the URL has changed, we need to redownload the file, so we # should ignore the internally cached version. if cls.iers_table.meta.get('data_url') == conf.iers_auto_url: return cls.iers_table try: filename = download_file(conf.iers_auto_url, cache=True) except Exception as err: # Issue a warning here, perhaps user is offline. An exception # will be raised downstream when actually trying to interpolate # predictive values. warn(AstropyWarning('failed to download {}, using local IERS-B: {}' .format(conf.iers_auto_url, str(err)))) cls.iers_table = IERS.open() return cls.iers_table cls.iers_table = cls.read(file=filename) cls.iers_table.meta['data_url'] = str(conf.iers_auto_url) return cls.iers_table def _check_interpolate_indices(self, indices_orig, indices_clipped, max_input_mjd): """Check that the indices from interpolation match those after clipping to the valid table range. The IERS_Auto class is exempted as long as it has sufficiently recent available data so the clipped interpolation is always within the confidence bounds of current Earth rotation knowledge. """ predictive_mjd = self.meta['predictive_mjd'] # See explanation in _refresh_table_as_needed for these conditions auto_max_age = (conf.auto_max_age if conf.auto_max_age is not None else np.finfo(float).max) if (max_input_mjd > predictive_mjd and self.time_now.mjd - predictive_mjd > auto_max_age): raise ValueError(INTERPOLATE_ERROR) def _refresh_table_as_needed(self, mjd): """Potentially update the IERS table in place depending on the requested time values in ``mjd`` and the time span of the table. For IERS_Auto the behavior is that the table is refreshed from the IERS server if both the following apply: - Any of the requested IERS values are predictive. The IERS-A table contains predictive data out for a year after the available definitive values. - The first predictive values are at least ``conf.auto_max_age days`` old. In other words the IERS-A table was created by IERS long enough ago that it can be considered stale for predictions. """ max_input_mjd = np.max(mjd) now_mjd = self.time_now.mjd # IERS-A table contains predictive data out for a year after # the available definitive values. fpi = self.meta['predictive_index'] predictive_mjd = self.meta['predictive_mjd'] # Update table in place if necessary auto_max_age = (conf.auto_max_age if conf.auto_max_age is not None else np.finfo(float).max) # If auto_max_age is smaller than IERS update time then repeated downloads may # occur without getting updated values (giving a IERSStaleWarning). if auto_max_age < 10: raise ValueError('IERS auto_max_age configuration value must be larger than 10 days') if (max_input_mjd > predictive_mjd and now_mjd - predictive_mjd > auto_max_age): # Get the latest version try: clear_download_cache(conf.iers_auto_url) filename = download_file(conf.iers_auto_url, cache=True) except Exception as err: # Issue a warning here, perhaps user is offline. An exception # will be raised downstream when actually trying to interpolate # predictive values. warn(AstropyWarning('failed to download {}: {}.\nA coordinate or time-related ' 'calculation might be compromised or fail because the dates are ' 'not covered by the available IERS file. See the ' '"IERS data access" section of the astropy documentation ' 'for additional information on working offline.' .format(conf.iers_auto_url, str(err)))) return new_table = self.__class__.read(file=filename) # New table has new values? if new_table['MJD'][-1] > self['MJD'][-1]: # Replace *replace* current values from the first predictive index through # the end of the current table. This replacement is much faster than just # deleting all rows and then using add_row for the whole duration. new_fpi = np.searchsorted(new_table['MJD'].value, predictive_mjd, side='right') n_replace = len(self) - fpi self[fpi:] = new_table[new_fpi:new_fpi + n_replace] # Sanity check for continuity if new_table['MJD'][new_fpi + n_replace] - self['MJD'][-1] != 1.0 * u.d: raise ValueError('unexpected gap in MJD when refreshing IERS table') # Now add new rows in place for row in new_table[new_fpi + n_replace:]: self.add_row(row) self.meta.update(new_table.meta) else: warn(IERSStaleWarning( 'IERS_Auto predictive values are older than {} days but downloading ' 'the latest table did not find newer values'.format(conf.auto_max_age))) @classmethod def _substitute_iers_b(cls, table): """Substitute IERS B values with those from a real IERS B table. IERS-A has IERS-B values included, but for reasons unknown these do not match the latest IERS-B values (see comments in #4436). Here, we use the bundled astropy IERS-B table to overwrite the values in the downloaded IERS-A table. """ iers_b = IERS_B.open() # Substitute IERS-B values for existing B values in IERS-A table mjd_b = table['MJD'][~table['UT1_UTC_B'].mask] i0 = np.searchsorted(iers_b['MJD'].value, mjd_b[0], side='left') i1 = np.searchsorted(iers_b['MJD'].value, mjd_b[-1], side='right') iers_b = iers_b[i0:i1] n_iers_b = len(iers_b) # If there is overlap then replace IERS-A values from available IERS-B if n_iers_b > 0: # Sanity check that we are overwriting the correct values if not np.allclose(table['MJD'][:n_iers_b], iers_b['MJD'].value): raise ValueError('unexpected mismatch when copying ' 'IERS-B values into IERS-A table.') # Finally do the overwrite table['UT1_UTC_B'][:n_iers_b] = iers_b['UT1_UTC'].value table['PM_X_B'][:n_iers_b] = iers_b['PM_x'].value table['PM_Y_B'][:n_iers_b] = iers_b['PM_y'].value return table # by default for IERS class, read IERS-B table IERS.read = IERS_B.read
013446fdd07bdef6b2d568e41ca62c74cd1e662309d985c43f2d0722358a2591
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ A collection of functions for checking various XML-related strings for standards compliance. """ import re import urllib.parse def check_id(ID): """ Returns `True` if *ID* is a valid XML ID. """ return re.match(r"^[A-Za-z_][A-Za-z0-9_\.\-]*$", ID) is not None def fix_id(ID): """ Given an arbitrary string, create one that can be used as an xml id. This is rather simplistic at the moment, since it just replaces non-valid characters with underscores. """ if re.match(r"^[A-Za-z_][A-Za-z0-9_\.\-]*$", ID): return ID if len(ID): corrected = ID if not len(corrected) or re.match('^[^A-Za-z_]$', corrected[0]): corrected = '_' + corrected corrected = (re.sub(r"[^A-Za-z_]", '_', corrected[0]) + re.sub(r"[^A-Za-z0-9_\.\-]", "_", corrected[1:])) return corrected return '' _token_regex = r"(?![\r\l\t ])[^\r\l\t]*(?![\r\l\t ])" def check_token(token): """ Returns `True` if *token* is a valid XML token, as defined by XML Schema Part 2. """ return (token == '' or re.match( r"[^\r\n\t ]?([^\r\n\t ]| [^\r\n\t ])*[^\r\n\t ]?$", token) is not None) def check_mime_content_type(content_type): """ Returns `True` if *content_type* is a valid MIME content type (syntactically at least), as defined by RFC 2045. """ ctrls = ''.join(chr(x) for x in range(0, 0x20)) token_regex = '[^()<>@,;:\\\"/[\\]?= {}\x7f]+'.format(ctrls) return re.match( r'(?P<type>{})/(?P<subtype>{})$'.format(token_regex, token_regex), content_type) is not None def check_anyuri(uri): """ Returns `True` if *uri* is a valid URI as defined in RFC 2396. """ if (re.match( (r"(([a-zA-Z][0-9a-zA-Z+\-\.]*:)?/{0,2}[0-9a-zA-Z;" + r"/?:@&=+$\.\-_!~*'()%]+)?(#[0-9a-zA-Z;/?:@&=+$\.\-_!~*'()%]+)?"), uri) is None): return False try: urllib.parse.urlparse(uri) except Exception: return False return True
c6a37290e0b33fe71b7d7d023e61a7a4fd246424fb832b21570f14a15dea2d8d
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Contains a class that makes it simple to stream out well-formed and nicely-indented XML. """ # STDLIB import contextlib import textwrap try: import bleach HAS_BLEACH = True except ImportError: HAS_BLEACH = False try: from . import _iterparser except ImportError: def xml_escape_cdata(s): """ Escapes &, < and > in an XML CDATA string. """ s = s.replace("&", "&amp;") s = s.replace("<", "&lt;") s = s.replace(">", "&gt;") return s def xml_escape(s): """ Escapes &, ', ", < and > in an XML attribute value. """ s = s.replace("&", "&amp;") s = s.replace("'", "&apos;") s = s.replace("\"", "&quot;") s = s.replace("<", "&lt;") s = s.replace(">", "&gt;") return s else: xml_escape_cdata = _iterparser.escape_xml_cdata xml_escape = _iterparser.escape_xml class XMLWriter: """ A class to write well-formed and nicely indented XML. Use like this:: w = XMLWriter(fh) with w.tag('html'): with w.tag('body'): w.data('This is the content') Which produces:: <html> <body> This is the content </body> </html> """ def __init__(self, file): """ Parameters ---------- file : writable file-like object. """ self.write = file.write if hasattr(file, "flush"): self.flush = file.flush self._open = 0 # true if start tag is open self._tags = [] self._data = [] self._indentation = " " * 64 self.xml_escape_cdata = xml_escape_cdata self.xml_escape = xml_escape def _flush(self, indent=True, wrap=False): """ Flush internal buffers. """ if self._open: if indent: self.write(">\n") else: self.write(">") self._open = 0 if self._data: data = ''.join(self._data) if wrap: indent = self.get_indentation_spaces(1) data = textwrap.fill( data, initial_indent=indent, subsequent_indent=indent) self.write('\n') self.write(self.xml_escape_cdata(data)) self.write('\n') self.write(self.get_indentation_spaces()) else: self.write(self.xml_escape_cdata(data)) self._data = [] def start(self, tag, attrib={}, **extra): """ Opens a new element. Attributes can be given as keyword arguments, or as a string/string dictionary. The method returns an opaque identifier that can be passed to the :meth:`close` method, to close all open elements up to and including this one. Parameters ---------- tag : str The element name attrib : dict of str -> str Attribute dictionary. Alternatively, attributes can be given as keyword arguments. Returns ------- id : int Returns an element identifier. """ self._flush() # This is just busy work -- we know our tag names are clean # tag = xml_escape_cdata(tag) self._data = [] self._tags.append(tag) self.write(self.get_indentation_spaces(-1)) self.write("<{}".format(tag)) if attrib or extra: attrib = attrib.copy() attrib.update(extra) attrib = list(attrib.items()) attrib.sort() for k, v in attrib: if v is not None: # This is just busy work -- we know our keys are clean # k = xml_escape_cdata(k) v = self.xml_escape(v) self.write(" {}=\"{}\"".format(k, v)) self._open = 1 return len(self._tags) @contextlib.contextmanager def xml_cleaning_method(self, method='escape_xml', **clean_kwargs): """Context manager to control how XML data tags are cleaned (escaped) to remove potentially unsafe characters or constructs. The default (``method='escape_xml'``) applies brute-force escaping of certain key XML characters like ``<``, ``>``, and ``&`` to ensure that the output is not valid XML. In order to explicitly allow certain XML tags (e.g. link reference or emphasis tags), use ``method='bleach_clean'``. This sanitizes the data string using the ``clean`` function of the `http://bleach.readthedocs.io/en/latest/clean.html <bleach>`_ package. Any additional keyword arguments will be passed directly to the ``clean`` function. Finally, use ``method='none'`` to disable any sanitization. This should be used sparingly. Example:: w = writer.XMLWriter(ListWriter(lines)) with w.xml_cleaning_method('bleach_clean'): w.start('td') w.data('<a href="http://google.com">google.com</a>') w.end() Parameters ---------- method : str Cleaning method. Allowed values are "escape_xml", "bleach_clean", and "none". **clean_kwargs : keyword args Additional keyword args that are passed to the bleach.clean() function. """ current_xml_escape_cdata = self.xml_escape_cdata if method == 'bleach_clean': if HAS_BLEACH: if clean_kwargs is None: clean_kwargs = {} self.xml_escape_cdata = lambda x: bleach.clean(x, **clean_kwargs) else: raise ValueError('bleach package is required when HTML escaping is disabled.\n' 'Use "pip install bleach".') elif method == "none": self.xml_escape_cdata = lambda x: x elif method != 'escape_xml': raise ValueError('allowed values of method are "escape_xml", "bleach_clean", and "none"') yield self.xml_escape_cdata = current_xml_escape_cdata @contextlib.contextmanager def tag(self, tag, attrib={}, **extra): """ A convenience method for creating wrapper elements using the ``with`` statement. Examples -------- >>> with writer.tag('foo'): # doctest: +SKIP ... writer.element('bar') ... # </foo> is implicitly closed here ... Parameters are the same as to `start`. """ self.start(tag, attrib, **extra) yield self.end(tag) def comment(self, comment): """ Adds a comment to the output stream. Parameters ---------- comment : str Comment text, as a Unicode string. """ self._flush() self.write(self.get_indentation_spaces()) self.write("<!-- {} -->\n".format(self.xml_escape_cdata(comment))) def data(self, text): """ Adds character data to the output stream. Parameters ---------- text : str Character data, as a Unicode string. """ self._data.append(text) def end(self, tag=None, indent=True, wrap=False): """ Closes the current element (opened by the most recent call to `start`). Parameters ---------- tag : str Element name. If given, the tag must match the start tag. If omitted, the current element is closed. """ if tag: if not self._tags: raise ValueError("unbalanced end({})".format(tag)) if tag != self._tags[-1]: raise ValueError("expected end({}), got {}".format( self._tags[-1], tag)) else: if not self._tags: raise ValueError("unbalanced end()") tag = self._tags.pop() if self._data: self._flush(indent, wrap) elif self._open: self._open = 0 self.write("/>\n") return if indent: self.write(self.get_indentation_spaces()) self.write("</{}>\n".format(tag)) def close(self, id): """ Closes open elements, up to (and including) the element identified by the given identifier. Parameters ---------- id : int Element identifier, as returned by the `start` method. """ while len(self._tags) > id: self.end() def element(self, tag, text=None, wrap=False, attrib={}, **extra): """ Adds an entire element. This is the same as calling `start`, `data`, and `end` in sequence. The ``text`` argument can be omitted. """ self.start(tag, attrib, **extra) if text: self.data(text) self.end(indent=False, wrap=wrap) def flush(self): pass # replaced by the constructor def get_indentation(self): """ Returns the number of indentation levels the file is currently in. """ return len(self._tags) def get_indentation_spaces(self, offset=0): """ Returns a string of spaces that matches the current indentation level. """ return self._indentation[:len(self._tags) + offset] @staticmethod def object_attrs(obj, attrs): """ Converts an object with a bunch of attributes on an object into a dictionary for use by the `XMLWriter`. Parameters ---------- obj : object Any Python object attrs : sequence of str Attribute names to pull from the object Returns ------- attrs : dict Maps attribute names to the values retrieved from ``obj.attr``. If any of the attributes is `None`, it will not appear in the output dictionary. """ d = {} for attr in attrs: if getattr(obj, attr) is not None: d[attr.replace('_', '-')] = str(getattr(obj, attr)) return d
1b70e35f9d7e769a14f33ed3c57d3e81da1dc50648183a66721b85fd163ca2f8
# Licensed under a 3-clause BSD style license - see LICENSE.rst from distutils.core import Extension from os.path import join import sys from astropy_helpers import setup_helpers def get_external_libraries(): return ['expat'] def get_extensions(build_type='release'): XML_DIR = 'astropy/utils/xml/src' cfg = setup_helpers.DistutilsExtensionArgs({ 'sources': [join(XML_DIR, "iterparse.c")] }) if setup_helpers.use_system_library('expat'): cfg.update(setup_helpers.pkg_config(['expat'], ['expat'])) else: EXPAT_DIR = 'cextern/expat/lib' cfg['sources'].extend([ join(EXPAT_DIR, fn) for fn in ["xmlparse.c", "xmlrole.c", "xmltok.c", "xmltok_impl.c"]]) cfg['include_dirs'].extend([XML_DIR, EXPAT_DIR]) if sys.platform.startswith('linux'): # This is to ensure we only export the Python entry point # symbols and the linker won't try to use the system expat in # place of ours. cfg['extra_link_args'].extend([ '-Wl,--version-script={0}'.format( join(XML_DIR, 'iterparse.map')) ]) cfg['define_macros'].append(("HAVE_EXPAT_CONFIG_H", 1)) if sys.byteorder == 'big': cfg['define_macros'].append(('BYTEORDER', '4321')) else: cfg['define_macros'].append(('BYTEORDER', '1234')) if sys.platform != 'win32': cfg['define_macros'].append(('HAVE_UNISTD_H', None)) return [Extension("astropy.utils.xml._iterparser", **cfg)]
d7f37b1ba8312265baaa1e217fc0287c7da270f87ce3790f0561045c0ecc1c56
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module includes a fast iterator-based XML parser. """ # STDLIB import contextlib import io import sys # ASTROPY from .. import data __all__ = ['get_xml_iterator', 'get_xml_encoding', 'xml_readlines'] @contextlib.contextmanager def _convert_to_fd_or_read_function(fd): """ Returns a function suitable for streaming input, or a file object. This function is only useful if passing off to C code where: - If it's a real file object, we want to use it as a real C file object to avoid the Python overhead. - If it's not a real file object, it's much handier to just have a Python function to call. This is somewhat quirky behavior, of course, which is why it is private. For a more useful version of similar behavior, see `astropy.utils.misc.get_readable_fileobj`. Parameters ---------- fd : object May be: - a file object. If the file is uncompressed, this raw file object is returned verbatim. Otherwise, the read method is returned. - a function that reads from a stream, in which case it is returned verbatim. - a file path, in which case it is opened. Again, like a file object, if it's uncompressed, a raw file object is returned, otherwise its read method. - an object with a :meth:`read` method, in which case that method is returned. Returns ------- fd : context-dependent See above. """ if callable(fd): yield fd return with data.get_readable_fileobj(fd, encoding='binary') as new_fd: if sys.platform.startswith('win'): yield new_fd.read else: if isinstance(new_fd, io.FileIO): yield new_fd else: yield new_fd.read def _fast_iterparse(fd, buffersize=2 ** 10): from xml.parsers import expat if not callable(fd): read = fd.read else: read = fd queue = [] text = [] def start(name, attr): queue.append((True, name, attr, (parser.CurrentLineNumber, parser.CurrentColumnNumber))) del text[:] def end(name): queue.append((False, name, ''.join(text).strip(), (parser.CurrentLineNumber, parser.CurrentColumnNumber))) parser = expat.ParserCreate() parser.specified_attributes = True parser.StartElementHandler = start parser.EndElementHandler = end parser.CharacterDataHandler = text.append Parse = parser.Parse data = read(buffersize) while data: Parse(data, False) for elem in queue: yield elem del queue[:] data = read(buffersize) Parse('', True) for elem in queue: yield elem # Try to import the C version of the iterparser, otherwise fall back # to the Python implementation above. _slow_iterparse = _fast_iterparse try: from . import _iterparser _fast_iterparse = _iterparser.IterParser except ImportError: pass @contextlib.contextmanager def get_xml_iterator(source, _debug_python_based_parser=False): """ Returns an iterator over the elements of an XML file. The iterator doesn't ever build a tree, so it is much more memory and time efficient than the alternative in ``cElementTree``. Parameters ---------- fd : readable file-like object or read function Returns ------- parts : iterator The iterator returns 4-tuples (*start*, *tag*, *data*, *pos*): - *start*: when `True` is a start element event, otherwise an end element event. - *tag*: The name of the element - *data*: Depends on the value of *event*: - if *start* == `True`, data is a dictionary of attributes - if *start* == `False`, data is a string containing the text content of the element - *pos*: Tuple (*line*, *col*) indicating the source of the event. """ with _convert_to_fd_or_read_function(source) as fd: if _debug_python_based_parser: context = _slow_iterparse(fd) else: context = _fast_iterparse(fd) yield iter(context) def get_xml_encoding(source): """ Determine the encoding of an XML file by reading its header. Parameters ---------- source : readable file-like object, read function or str path Returns ------- encoding : str """ with get_xml_iterator(source) as iterator: start, tag, data, pos = next(iterator) if not start or tag != 'xml': raise OSError('Invalid XML file') # The XML spec says that no encoding === utf-8 return data.get('encoding') or 'utf-8' def xml_readlines(source): """ Get the lines from a given XML file. Correctly determines the encoding and always returns unicode. Parameters ---------- source : readable file-like object, read function or str path Returns ------- lines : list of unicode """ encoding = get_xml_encoding(source) with data.get_readable_fileobj(source, encoding=encoding) as input: input.seek(0) xml_lines = input.readlines() return xml_lines