hash
stringlengths
64
64
content
stringlengths
0
1.51M
69d8b2edb245bf38aa3a621d59fc4090de73de7f1552f04d642dd90765df408c
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains a helper function to fill erfa.astrom struct and a ScienceState, which allows to speed up coordinate transformations at the expense of accuracy. """ import warnings import erfa import numpy as np import astropy.units as u from astropy.time import Time from astropy.utils.exceptions import AstropyWarning from astropy.utils.state import ScienceState from .builtin_frames.utils import ( get_cip, get_jd12, get_polar_motion, pav2pv, prepare_earth_position_vel, ) from .matrix_utilities import rotation_matrix __all__ = [] class ErfaAstrom: """ The default provider for astrometry values. A utility class to extract the necessary arguments for erfa functions from frame attributes, call the corresponding erfa functions and return the astrom object. """ @staticmethod def apco(frame_or_coord): """ Wrapper for ``erfa.apco``, used in conversions AltAz <-> ICRS and CIRS <-> ICRS Parameters ---------- frame_or_coord : ``astropy.coordinates.BaseCoordinateFrame`` or ``astropy.coordinates.SkyCoord`` Frame or coordinate instance in the corresponding frame for which to calculate the calculate the astrom values. For this function, an AltAz or CIRS frame is expected. """ lon, lat, height = frame_or_coord.location.to_geodetic("WGS84") obstime = frame_or_coord.obstime jd1_tt, jd2_tt = get_jd12(obstime, "tt") xp, yp = get_polar_motion(obstime) sp = erfa.sp00(jd1_tt, jd2_tt) x, y, s = get_cip(jd1_tt, jd2_tt) era = erfa.era00(*get_jd12(obstime, "ut1")) earth_pv, earth_heliocentric = prepare_earth_position_vel(obstime) # refraction constants if hasattr(frame_or_coord, "pressure"): # this is an AltAz like frame. Calculate refraction refa, refb = erfa.refco( frame_or_coord.pressure.to_value(u.hPa), frame_or_coord.temperature.to_value(u.deg_C), frame_or_coord.relative_humidity.value, frame_or_coord.obswl.to_value(u.micron), ) else: # This is not an AltAz frame, so don't bother computing refraction refa, refb = 0.0, 0.0 return erfa.apco( jd1_tt, jd2_tt, earth_pv, earth_heliocentric, x, y, s, era, lon.to_value(u.radian), lat.to_value(u.radian), height.to_value(u.m), xp, yp, sp, refa, refb, ) @staticmethod def apcs(frame_or_coord): """ Wrapper for ``erfa.apcs``, used in conversions GCRS <-> ICRS Parameters ---------- frame_or_coord : ``astropy.coordinates.BaseCoordinateFrame`` or ``astropy.coordinates.SkyCoord`` Frame or coordinate instance in the corresponding frame for which to calculate the calculate the astrom values. For this function, a GCRS frame is expected. """ jd1_tt, jd2_tt = get_jd12(frame_or_coord.obstime, "tt") obs_pv = pav2pv( frame_or_coord.obsgeoloc.get_xyz(xyz_axis=-1).value, frame_or_coord.obsgeovel.get_xyz(xyz_axis=-1).value, ) earth_pv, earth_heliocentric = prepare_earth_position_vel( frame_or_coord.obstime ) return erfa.apcs(jd1_tt, jd2_tt, obs_pv, earth_pv, earth_heliocentric) @staticmethod def apio(frame_or_coord): """ Slightly modified equivalent of ``erfa.apio``, used in conversions AltAz <-> CIRS. Since we use a topocentric CIRS frame, we have dropped the steps needed to calculate diurnal aberration. Parameters ---------- frame_or_coord : ``astropy.coordinates.BaseCoordinateFrame`` or ``astropy.coordinates.SkyCoord`` Frame or coordinate instance in the corresponding frame for which to calculate the calculate the astrom values. For this function, an AltAz frame is expected. """ # Calculate erfa.apio input parameters. # TIO locator s' sp = erfa.sp00(*get_jd12(frame_or_coord.obstime, "tt")) # Earth rotation angle. theta = erfa.era00(*get_jd12(frame_or_coord.obstime, "ut1")) # Longitude and latitude in radians. lon, lat, height = frame_or_coord.location.to_geodetic("WGS84") elong = lon.to_value(u.radian) phi = lat.to_value(u.radian) # Polar motion, rotated onto local meridian xp, yp = get_polar_motion(frame_or_coord.obstime) # we need an empty astrom structure before we fill in the required sections astrom = np.zeros(frame_or_coord.obstime.shape, dtype=erfa.dt_eraASTROM) # Form the rotation matrix, CIRS to apparent [HA,Dec]. r = ( rotation_matrix(elong, "z", unit=u.radian) @ rotation_matrix(-yp, "x", unit=u.radian) @ rotation_matrix(-xp, "y", unit=u.radian) @ rotation_matrix(theta + sp, "z", unit=u.radian) ) # Solve for local Earth rotation angle. a = r[..., 0, 0] b = r[..., 0, 1] eral = np.arctan2(b, a) astrom["eral"] = eral # Solve for polar motion [X,Y] with respect to local meridian. c = r[..., 0, 2] astrom["xpl"] = np.arctan2(c, np.sqrt(a * a + b * b)) a = r[..., 1, 2] b = r[..., 2, 2] astrom["ypl"] = -np.arctan2(a, b) # Adjusted longitude. astrom["along"] = erfa.anpm(eral - theta) # Functions of latitude. astrom["sphi"] = np.sin(phi) astrom["cphi"] = np.cos(phi) # Omit two steps that are zero for a geocentric observer: # Observer's geocentric position and velocity (m, m/s, CIRS). # Magnitude of diurnal aberration vector. # Refraction constants. astrom["refa"], astrom["refb"] = erfa.refco( frame_or_coord.pressure.to_value(u.hPa), frame_or_coord.temperature.to_value(u.deg_C), frame_or_coord.relative_humidity.value, frame_or_coord.obswl.to_value(u.micron), ) return astrom class ErfaAstromInterpolator(ErfaAstrom): """ A provider for astrometry values that does not call erfa for each individual timestamp but interpolates linearly between support points. For the interpolation, float64 MJD values are used, so time precision for the interpolation will be around a microsecond. This can dramatically speed up coordinate transformations, e.g. between CIRS and ICRS, when obstime is an array of many values (factors of 10 to > 100 depending on the selected resolution, number of points and the time range of the values). The precision of the transformation will still be in the order of microseconds for reasonable values of time_resolution, e.g. ``300 * u.s``. Users should benchmark performance and accuracy with the default transformation for their specific use case and then choose a suitable ``time_resolution`` from there. This class is intended be used together with the ``erfa_astrom`` science state, e.g. in a context manager like this Example ------- >>> from astropy.coordinates import SkyCoord, CIRS >>> from astropy.coordinates.erfa_astrom import erfa_astrom, ErfaAstromInterpolator >>> import astropy.units as u >>> from astropy.time import Time >>> import numpy as np >>> obstime = Time('2010-01-01T20:00:00') + np.linspace(0, 4, 1000) * u.hour >>> crab = SkyCoord(ra='05h34m31.94s', dec='22d00m52.2s') >>> with erfa_astrom.set(ErfaAstromInterpolator(300 * u.s)): ... cirs = crab.transform_to(CIRS(obstime=obstime)) """ @u.quantity_input(time_resolution=u.day) def __init__(self, time_resolution): if time_resolution.to_value(u.us) < 10: warnings.warn( f"Using {self.__class__.__name__} with `time_resolution`" " below 10 microseconds might lead to numerical inaccuracies" " as the MJD-based interpolation is limited by floating point " " precision to about a microsecond of precision", AstropyWarning, ) self.mjd_resolution = time_resolution.to_value(u.day) def _get_support_points(self, obstime): """ Calculate support points for the interpolation. We divide the MJD by the time resolution (as single float64 values), and calculate ceil and floor. Then we take the unique and sorted values and scale back to MJD. This will create a sparse support for non-regular input obstimes. """ mjd_scaled = np.ravel(obstime.mjd / self.mjd_resolution) # unique already does sorting mjd_u = np.unique(np.concatenate([np.floor(mjd_scaled), np.ceil(mjd_scaled)])) return Time( mjd_u * self.mjd_resolution, format="mjd", scale=obstime.scale, ) @staticmethod def _prepare_earth_position_vel(support, obstime): """ Calculate Earth's position and velocity. Uses the coarser grid ``support`` to do the calculation, and interpolates onto the finer grid ``obstime``. """ pv_support, heliocentric_support = prepare_earth_position_vel(support) # do interpolation earth_pv = np.empty(obstime.shape, dtype=erfa.dt_pv) earth_heliocentric = np.empty(obstime.shape + (3,)) for dim in range(3): for key in "pv": earth_pv[key][..., dim] = np.interp( obstime.mjd, support.mjd, pv_support[key][..., dim] ) earth_heliocentric[..., dim] = np.interp( obstime.mjd, support.mjd, heliocentric_support[..., dim] ) return earth_pv, earth_heliocentric @staticmethod def _get_c2i(support, obstime): """ Calculate the Celestial-to-Intermediate rotation matrix. Uses the coarser grid ``support`` to do the calculation, and interpolates onto the finer grid ``obstime``. """ jd1_tt_support, jd2_tt_support = get_jd12(support, "tt") c2i_support = erfa.c2i06a(jd1_tt_support, jd2_tt_support) c2i = np.empty(obstime.shape + (3, 3)) for dim1 in range(3): for dim2 in range(3): c2i[..., dim1, dim2] = np.interp( obstime.mjd, support.mjd, c2i_support[..., dim1, dim2] ) return c2i @staticmethod def _get_cip(support, obstime): """ Find the X, Y coordinates of the CIP and the CIO locator, s. Uses the coarser grid ``support`` to do the calculation, and interpolates onto the finer grid ``obstime``. """ jd1_tt_support, jd2_tt_support = get_jd12(support, "tt") cip_support = get_cip(jd1_tt_support, jd2_tt_support) return tuple( np.interp(obstime.mjd, support.mjd, cip_component) for cip_component in cip_support ) @staticmethod def _get_polar_motion(support, obstime): """ Find the two polar motion components in radians Uses the coarser grid ``support`` to do the calculation, and interpolates onto the finer grid ``obstime``. """ polar_motion_support = get_polar_motion(support) return tuple( np.interp(obstime.mjd, support.mjd, polar_motion_component) for polar_motion_component in polar_motion_support ) def apco(self, frame_or_coord): """ Wrapper for ``erfa.apco``, used in conversions AltAz <-> ICRS and CIRS <-> ICRS Parameters ---------- frame_or_coord : ``astropy.coordinates.BaseCoordinateFrame`` or ``astropy.coordinates.SkyCoord`` Frame or coordinate instance in the corresponding frame for which to calculate the calculate the astrom values. For this function, an AltAz or CIRS frame is expected. """ lon, lat, height = frame_or_coord.location.to_geodetic("WGS84") obstime = frame_or_coord.obstime support = self._get_support_points(obstime) jd1_tt, jd2_tt = get_jd12(obstime, "tt") # get the position and velocity arrays for the observatory. Need to # have xyz in last dimension, and pos/vel in one-but-last. earth_pv, earth_heliocentric = self._prepare_earth_position_vel( support, obstime ) xp, yp = self._get_polar_motion(support, obstime) sp = erfa.sp00(jd1_tt, jd2_tt) x, y, s = self._get_cip(support, obstime) era = erfa.era00(*get_jd12(obstime, "ut1")) # refraction constants if hasattr(frame_or_coord, "pressure"): # an AltAz like frame. Include refraction refa, refb = erfa.refco( frame_or_coord.pressure.to_value(u.hPa), frame_or_coord.temperature.to_value(u.deg_C), frame_or_coord.relative_humidity.value, frame_or_coord.obswl.to_value(u.micron), ) else: # a CIRS like frame - no refraction refa, refb = 0.0, 0.0 return erfa.apco( jd1_tt, jd2_tt, earth_pv, earth_heliocentric, x, y, s, era, lon.to_value(u.radian), lat.to_value(u.radian), height.to_value(u.m), xp, yp, sp, refa, refb, ) def apcs(self, frame_or_coord): """ Wrapper for ``erfa.apci``, used in conversions GCRS <-> ICRS Parameters ---------- frame_or_coord : ``astropy.coordinates.BaseCoordinateFrame`` or ``astropy.coordinates.SkyCoord`` Frame or coordinate instance in the corresponding frame for which to calculate the calculate the astrom values. For this function, a GCRS frame is expected. """ obstime = frame_or_coord.obstime support = self._get_support_points(obstime) # get the position and velocity arrays for the observatory. Need to # have xyz in last dimension, and pos/vel in one-but-last. earth_pv, earth_heliocentric = self._prepare_earth_position_vel( support, obstime ) pv = pav2pv( frame_or_coord.obsgeoloc.get_xyz(xyz_axis=-1).value, frame_or_coord.obsgeovel.get_xyz(xyz_axis=-1).value, ) jd1_tt, jd2_tt = get_jd12(obstime, "tt") return erfa.apcs(jd1_tt, jd2_tt, pv, earth_pv, earth_heliocentric) class erfa_astrom(ScienceState): """ ScienceState to select with astrom provider is used in coordinate transformations. """ _value = ErfaAstrom() @classmethod def validate(cls, value): if not isinstance(value, ErfaAstrom): raise TypeError(f"Must be an instance of {ErfaAstrom!r}") return value
d2b95730b9d81e7150425f84c9121c2379d586eac45f96dd7d3a20886b1f0e1e
# Licensed under a 3-clause BSD style license - see LICENSE.rst # Standard library import re import textwrap import warnings from datetime import datetime from urllib.request import Request, urlopen # Third-party from astropy import time as atime from astropy.utils.console import _color_text, color_print from .funcs import get_sun __all__ = [] class HumanError(ValueError): pass class CelestialError(ValueError): pass def get_sign(dt): """ """ if (int(dt.month) == 12 and int(dt.day) >= 22) or ( int(dt.month) == 1 and int(dt.day) <= 19 ): zodiac_sign = "capricorn" elif (int(dt.month) == 1 and int(dt.day) >= 20) or ( int(dt.month) == 2 and int(dt.day) <= 17 ): zodiac_sign = "aquarius" elif (int(dt.month) == 2 and int(dt.day) >= 18) or ( int(dt.month) == 3 and int(dt.day) <= 19 ): zodiac_sign = "pisces" elif (int(dt.month) == 3 and int(dt.day) >= 20) or ( int(dt.month) == 4 and int(dt.day) <= 19 ): zodiac_sign = "aries" elif (int(dt.month) == 4 and int(dt.day) >= 20) or ( int(dt.month) == 5 and int(dt.day) <= 20 ): zodiac_sign = "taurus" elif (int(dt.month) == 5 and int(dt.day) >= 21) or ( int(dt.month) == 6 and int(dt.day) <= 20 ): zodiac_sign = "gemini" elif (int(dt.month) == 6 and int(dt.day) >= 21) or ( int(dt.month) == 7 and int(dt.day) <= 22 ): zodiac_sign = "cancer" elif (int(dt.month) == 7 and int(dt.day) >= 23) or ( int(dt.month) == 8 and int(dt.day) <= 22 ): zodiac_sign = "leo" elif (int(dt.month) == 8 and int(dt.day) >= 23) or ( int(dt.month) == 9 and int(dt.day) <= 22 ): zodiac_sign = "virgo" elif (int(dt.month) == 9 and int(dt.day) >= 23) or ( int(dt.month) == 10 and int(dt.day) <= 22 ): zodiac_sign = "libra" elif (int(dt.month) == 10 and int(dt.day) >= 23) or ( int(dt.month) == 11 and int(dt.day) <= 21 ): zodiac_sign = "scorpio" elif (int(dt.month) == 11 and int(dt.day) >= 22) or ( int(dt.month) == 12 and int(dt.day) <= 21 ): zodiac_sign = "sagittarius" return zodiac_sign _VALID_SIGNS = [ "capricorn", "aquarius", "pisces", "aries", "taurus", "gemini", "cancer", "leo", "virgo", "libra", "scorpio", "sagittarius", ] # Some of the constellation names map to different astrological "sign names". # Astrologers really needs to talk to the IAU... _CONST_TO_SIGNS = {"capricornus": "capricorn", "scorpius": "scorpio"} _ZODIAC = ( (1900, "rat"), (1901, "ox"), (1902, "tiger"), (1903, "rabbit"), (1904, "dragon"), (1905, "snake"), (1906, "horse"), (1907, "goat"), (1908, "monkey"), (1909, "rooster"), (1910, "dog"), (1911, "pig"), ) # https://stackoverflow.com/questions/12791871/chinese-zodiac-python-program def _get_zodiac(yr): return _ZODIAC[(yr - _ZODIAC[0][0]) % 12][1] def horoscope(birthday, corrected=True, chinese=False): """ Enter your birthday as an `astropy.time.Time` object and receive a mystical horoscope about things to come. Parameters ---------- birthday : `astropy.time.Time` or str Your birthday as a `datetime.datetime` or `astropy.time.Time` object or "YYYY-MM-DD"string. corrected : bool Whether to account for the precession of the Earth instead of using the ancient Greek dates for the signs. After all, you do want your *real* horoscope, not a cheap inaccurate approximation, right? chinese : bool Chinese annual zodiac wisdom instead of Western one. Returns ------- Infinite wisdom, condensed into astrologically precise prose. Notes ----- This function was implemented on April 1. Take note of that date. """ from bs4 import BeautifulSoup today = datetime.now() err_msg = "Invalid response from celestial gods (failed to load horoscope)." headers = {"User-Agent": "foo/bar"} special_words = { "([sS]tar[s^ ]*)": "yellow", "([yY]ou[^ ]*)": "magenta", "([pP]lay[^ ]*)": "blue", "([hH]eart)": "red", "([fF]ate)": "lightgreen", } if isinstance(birthday, str): birthday = datetime.strptime(birthday, "%Y-%m-%d") if chinese: # TODO: Make this more accurate by using the actual date, not just year # Might need third-party tool like https://pypi.org/project/lunardate zodiac_sign = _get_zodiac(birthday.year) url = ( "https://www.horoscope.com/us/horoscopes/yearly/" f"{today.year}-chinese-horoscope-{zodiac_sign}.aspx" ) summ_title_sfx = f"in {today.year}" try: res = Request(url, headers=headers) with urlopen(res) as f: try: doc = BeautifulSoup(f, "html.parser") # TODO: Also include Love, Family & Friends, Work, Money, More? item = doc.find(id="overview") desc = item.getText() except Exception: raise CelestialError(err_msg) except Exception: raise CelestialError(err_msg) else: birthday = atime.Time(birthday) if corrected: with warnings.catch_warnings(): warnings.simplefilter("ignore") # Ignore ErfaWarning zodiac_sign = get_sun(birthday).get_constellation().lower() zodiac_sign = _CONST_TO_SIGNS.get(zodiac_sign, zodiac_sign) if zodiac_sign not in _VALID_SIGNS: raise HumanError( f"On your birthday the sun was in {zodiac_sign.title()}, which is" " not a sign of the zodiac. You must not exist. Or maybe you can" " settle for corrected=False." ) else: zodiac_sign = get_sign(birthday.to_datetime()) url = f"https://astrology.com/horoscope/daily/{zodiac_sign}.html" summ_title_sfx = f"on {today.strftime('%Y-%m-%d')}" res = Request(url, headers=headers) with urlopen(res) as f: try: doc = BeautifulSoup(f, "html.parser") item = doc.find("div", {"id": "content"}) desc = item.getText() except Exception: raise CelestialError(err_msg) print("*" * 79) color_print(f"Horoscope for {zodiac_sign.capitalize()} {summ_title_sfx}:", "green") print("*" * 79) for block in textwrap.wrap(desc, 79): split_block = block.split() for i, word in enumerate(split_block): for re_word in special_words.keys(): match = re.search(re_word, word) if match is None: continue split_block[i] = _color_text(match.groups()[0], special_words[re_word]) print(" ".join(split_block)) def inject_horoscope(): import astropy astropy._yourfuture = horoscope inject_horoscope()
4b2d5fc7a2fd51841bb0b65a210eb2ee3aa9f569686613be7fce6c766ae58fa4
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains standard functions for earth orientation, such as precession and nutation. This module is (currently) not intended to be part of the public API, but is instead primarily for internal use in `coordinates` """ import erfa import numpy as np from astropy.time import Time from .builtin_frames.utils import get_jd12 from .matrix_utilities import matrix_transpose, rotation_matrix jd1950 = Time("B1950").jd jd2000 = Time("J2000").jd def eccentricity(jd): """ Eccentricity of the Earth's orbit at the requested Julian Date. Parameters ---------- jd : scalar or array-like Julian date at which to compute the eccentricity Returns ------- eccentricity : scalar or array The eccentricity (or array of eccentricities) References ---------- * Explanatory Supplement to the Astronomical Almanac: P. Kenneth Seidelmann (ed), University Science Books (1992). """ T = (jd - jd1950) / 36525.0 p = (-0.000000126, -0.00004193, 0.01673011) return np.polyval(p, T) def mean_lon_of_perigee(jd): """ Computes the mean longitude of perigee of the Earth's orbit at the requested Julian Date. Parameters ---------- jd : scalar or array-like Julian date at which to compute the mean longitude of perigee Returns ------- mean_lon_of_perigee : scalar or array Mean longitude of perigee in degrees (or array of mean longitudes) References ---------- * Explanatory Supplement to the Astronomical Almanac: P. Kenneth Seidelmann (ed), University Science Books (1992). """ T = (jd - jd1950) / 36525.0 p = (0.012, 1.65, 6190.67, 1015489.951) return np.polyval(p, T) / 3600.0 def obliquity(jd, algorithm=2006): """ Computes the obliquity of the Earth at the requested Julian Date. Parameters ---------- jd : scalar or array-like Julian date (TT) at which to compute the obliquity algorithm : int Year of algorithm based on IAU adoption. Can be 2006, 2000 or 1980. The IAU 2006 algorithm is based on Hilton et al. 2006. The IAU 1980 algorithm is based on the Explanatory Supplement to the Astronomical Almanac (1992). The IAU 2000 algorithm starts with the IAU 1980 algorithm and applies a precession-rate correction from the IAU 2000 precession model. Returns ------- obliquity : scalar or array Mean obliquity in degrees (or array of obliquities) References ---------- * Hilton, J. et al., 2006, Celest.Mech.Dyn.Astron. 94, 351 * Capitaine, N., et al., 2003, Astron.Astrophys. 400, 1145-1154 * Explanatory Supplement to the Astronomical Almanac: P. Kenneth Seidelmann (ed), University Science Books (1992). """ if algorithm == 2006: return np.rad2deg(erfa.obl06(jd, 0)) elif algorithm == 2000: return np.rad2deg(erfa.obl80(jd, 0) + erfa.pr00(jd, 0)[1]) elif algorithm == 1980: return np.rad2deg(erfa.obl80(jd, 0)) else: raise ValueError("invalid algorithm year for computing obliquity") def precession_matrix_Capitaine(fromepoch, toepoch): """ Computes the precession matrix from one Julian epoch to another, per IAU 2006. Parameters ---------- fromepoch : `~astropy.time.Time` The epoch to precess from. toepoch : `~astropy.time.Time` The epoch to precess to. Returns ------- pmatrix : 3x3 array Precession matrix to get from ``fromepoch`` to ``toepoch`` References ---------- Hilton, J. et al., 2006, Celest.Mech.Dyn.Astron. 94, 351 """ # Multiply the two precession matrices (without frame bias) through J2000.0 fromepoch_to_J2000 = matrix_transpose(erfa.bp06(*get_jd12(fromepoch, "tt"))[1]) J2000_to_toepoch = erfa.bp06(*get_jd12(toepoch, "tt"))[1] return J2000_to_toepoch @ fromepoch_to_J2000 def _precession_matrix_besselian(epoch1, epoch2): """ Computes the precession matrix from one Besselian epoch to another using Newcomb's method. ``epoch1`` and ``epoch2`` are in Besselian year numbers. """ # tropical years t1 = (epoch1 - 1850.0) / 1000.0 t2 = (epoch2 - 1850.0) / 1000.0 dt = t2 - t1 zeta1 = 23035.545 + t1 * 139.720 + 0.060 * t1 * t1 zeta2 = 30.240 - 0.27 * t1 zeta3 = 17.995 pzeta = (zeta3, zeta2, zeta1, 0) zeta = np.polyval(pzeta, dt) / 3600 z1 = 23035.545 + t1 * 139.720 + 0.060 * t1 * t1 z2 = 109.480 + 0.39 * t1 z3 = 18.325 pz = (z3, z2, z1, 0) z = np.polyval(pz, dt) / 3600 theta1 = 20051.12 - 85.29 * t1 - 0.37 * t1 * t1 theta2 = -42.65 - 0.37 * t1 theta3 = -41.8 ptheta = (theta3, theta2, theta1, 0) theta = np.polyval(ptheta, dt) / 3600 return ( rotation_matrix(-z, "z") @ rotation_matrix(theta, "y") @ rotation_matrix(-zeta, "z") ) def nutation_components2000B(jd): """ Computes nutation components following the IAU 2000B specification Parameters ---------- jd : scalar Julian date (TT) at which to compute the nutation components Returns ------- eps : float epsilon in radians dpsi : float dpsi in radians deps : float depsilon in raidans """ dpsi, deps, epsa, _, _, _, _, _ = erfa.pn00b(jd, 0) return epsa, dpsi, deps def nutation_matrix(epoch): """ Nutation matrix generated from nutation components, IAU 2000B model. Matrix converts from mean coordinate to true coordinate as r_true = M * r_mean Parameters ---------- epoch : `~astropy.time.Time` The epoch at which to compute the nutation matrix Returns ------- nmatrix : 3x3 array Nutation matrix for the specified epoch References ---------- * Explanatory Supplement to the Astronomical Almanac: P. Kenneth Seidelmann (ed), University Science Books (1992). """ # TODO: implement higher precision 2006/2000A model if requested/needed return erfa.num00b(*get_jd12(epoch, "tt"))
3a2d8c4b82fd555d40f3906a090a893b6c08f7d5f012e39d8f7938c1e6fa3655
"""Implements the wrapper for the Astropy test runner. This is for backward-compatibility for other downstream packages and can be removed once astropy-helpers has reached end-of-life. """ import os import shutil import stat import subprocess import sys import tempfile from contextlib import contextmanager from setuptools import Command from astropy.logger import log @contextmanager def _suppress_stdout(): """ A context manager to temporarily disable stdout. Used later when installing a temporary copy of astropy to avoid a very verbose output. """ with open(os.devnull, "w") as devnull: old_stdout = sys.stdout sys.stdout = devnull try: yield finally: sys.stdout = old_stdout class FixRemoteDataOption(type): """ This metaclass is used to catch cases where the user is running the tests with --remote-data. We've now changed the --remote-data option so that it takes arguments, but we still want --remote-data to work as before and to enable all remote tests. With this metaclass, we can modify sys.argv before setuptools try to parse the command-line options. """ def __init__(cls, name, bases, dct): try: idx = sys.argv.index("--remote-data") except ValueError: pass else: sys.argv[idx] = "--remote-data=any" try: idx = sys.argv.index("-R") except ValueError: pass else: sys.argv[idx] = "-R=any" return super().__init__(name, bases, dct) class AstropyTest(Command, metaclass=FixRemoteDataOption): description = "Run the tests for this package" user_options = [ ( "package=", "P", "The name of a specific package to test, e.g. 'io.fits' or 'utils'. " "Accepts comma separated string to specify multiple packages. " "If nothing is specified, all default tests are run.", ), ( "test-path=", "t", "Specify a test location by path. If a relative path to a .py file, " 'it is relative to the built package, so e.g., a leading "astropy/" ' "is necessary. If a relative path to a .rst file, it is relative to " "the directory *below* the --docs-path directory, so a leading " '"docs/" is usually necessary. May also be an absolute path.', ), ("verbose-results", "V", "Turn on verbose output from pytest."), ("plugins=", "p", "Plugins to enable when running pytest."), ("pastebin=", "b", "Enable pytest pastebin output. Either 'all' or 'failed'."), ("args=", "a", "Additional arguments to be passed to pytest."), ( "remote-data=", "R", "Run tests that download remote data. Should be " "one of none/astropy/any (defaults to none).", ), ( "pep8", "8", "Enable PEP8 checking and disable regular tests. " "Requires the pytest-pep8 plugin.", ), ("pdb", "d", "Start the interactive Python debugger on errors."), ("coverage", "c", "Create a coverage report. Requires the coverage package."), ( "open-files", "o", "Fail if any tests leave files open. Requires the psutil package.", ), ( "parallel=", "j", "Run the tests in parallel on the specified number of " 'CPUs. If "auto", all the cores on the machine will be ' "used. Requires the pytest-xdist plugin.", ), ( "docs-path=", None, "The path to the documentation .rst files. If not provided, and " 'the current directory contains a directory called "docs", that ' "will be used.", ), ("skip-docs", None, "Don't test the documentation .rst files."), ( "repeat=", None, "How many times to repeat each test (can be used to check for " "sporadic failures).", ), ( "temp-root=", None, "The root directory in which to create the temporary testing files. " "If unspecified the system default is used (e.g. /tmp) as explained " "in the documentation for tempfile.mkstemp.", ), ( "verbose-install", None, "Turn on terminal output from the installation of astropy in a " "temporary folder.", ), ("readonly", None, "Make the temporary installation being tested read-only."), ] package_name = "" def initialize_options(self): self.package = None self.test_path = None self.verbose_results = False self.plugins = None self.pastebin = None self.args = None self.remote_data = "none" self.pep8 = False self.pdb = False self.coverage = False self.open_files = False self.parallel = 0 self.docs_path = None self.skip_docs = False self.repeat = None self.temp_root = None self.verbose_install = False self.readonly = False def finalize_options(self): # Normally we would validate the options here, but that's handled in # run_tests pass def generate_testing_command(self): """ Build a Python script to run the tests. """ cmd_pre = "" # Commands to run before the test function cmd_post = "" # Commands to run after the test function if self.coverage: pre, post = self._generate_coverage_commands() cmd_pre += pre cmd_post += post set_flag = "import builtins; builtins._ASTROPY_TEST_ = True" cmd = ( # see _build_temp_install below "{cmd_pre}{0}; import {1.package_name}, sys; result = (" "{1.package_name}.test(" "package={1.package!r}, " "test_path={1.test_path!r}, " "args={1.args!r}, " "plugins={1.plugins!r}, " "verbose={1.verbose_results!r}, " "pastebin={1.pastebin!r}, " "remote_data={1.remote_data!r}, " "pep8={1.pep8!r}, " "pdb={1.pdb!r}, " "open_files={1.open_files!r}, " "parallel={1.parallel!r}, " "docs_path={1.docs_path!r}, " "skip_docs={1.skip_docs!r}, " "add_local_eggs_to_path=True, " "repeat={1.repeat!r})); " "{cmd_post}" "sys.exit(result)" ) return cmd.format(set_flag, self, cmd_pre=cmd_pre, cmd_post=cmd_post) def run(self): """ Run the tests! """ # Install the runtime dependencies. if self.distribution.install_requires: self.distribution.fetch_build_eggs(self.distribution.install_requires) # Ensure there is a doc path if self.docs_path is None: cfg_docs_dir = self.distribution.get_option_dict("build_docs").get( "source_dir", None ) # Some affiliated packages use this. # See astropy/package-template#157 if cfg_docs_dir is not None and os.path.exists(cfg_docs_dir[1]): self.docs_path = os.path.abspath(cfg_docs_dir[1]) # fall back on a default path of "docs" elif os.path.exists("docs"): # pragma: no cover self.docs_path = os.path.abspath("docs") # Build a testing install of the package self._build_temp_install() # Install the test dependencies # NOTE: we do this here after _build_temp_install because there is # a weird but which occurs if psutil is installed in this way before # astropy is built, Cython can have segmentation fault. Strange, eh? if self.distribution.tests_require: self.distribution.fetch_build_eggs(self.distribution.tests_require) # Copy any additional dependencies that may have been installed via # tests_requires or install_requires. We then pass the # add_local_eggs_to_path=True option to package.test() to make sure the # eggs get included in the path. if os.path.exists(".eggs"): shutil.copytree(".eggs", os.path.join(self.testing_path, ".eggs")) # This option exists so that we can make sure that the tests don't # write to an installed location. if self.readonly: log.info("changing permissions of temporary installation to read-only") self._change_permissions_testing_path(writable=False) # Run everything in a try: finally: so that the tmp dir gets deleted. try: # Construct this modules testing command cmd = self.generate_testing_command() # Run the tests in a subprocess--this is necessary since # new extension modules may have appeared, and this is the # easiest way to set up a new environment testproc = subprocess.Popen( [sys.executable, "-c", cmd], cwd=self.testing_path, close_fds=False ) retcode = testproc.wait() except KeyboardInterrupt: import signal # If a keyboard interrupt is handled, pass it to the test # subprocess to prompt pytest to initiate its teardown testproc.send_signal(signal.SIGINT) retcode = testproc.wait() finally: # Remove temporary directory if self.readonly: self._change_permissions_testing_path(writable=True) shutil.rmtree(self.tmp_dir) raise SystemExit(retcode) def _build_temp_install(self): """ Install the package and to a temporary directory for the purposes of testing. This allows us to test the install command, include the entry points, and also avoids creating pyc and __pycache__ directories inside the build directory """ # On OSX the default path for temp files is under /var, but in most # cases on OSX /var is actually a symlink to /private/var; ensure we # dereference that link, because pytest is very sensitive to relative # paths... tmp_dir = tempfile.mkdtemp( prefix=self.package_name + "-test-", dir=self.temp_root ) self.tmp_dir = os.path.realpath(tmp_dir) log.info(f"installing to temporary directory: {self.tmp_dir}") # We now install the package to the temporary directory. We do this # rather than build and copy because this will ensure that e.g. entry # points work. self.reinitialize_command("install") install_cmd = self.distribution.get_command_obj("install") install_cmd.prefix = self.tmp_dir if self.verbose_install: self.run_command("install") else: with _suppress_stdout(): self.run_command("install") # We now get the path to the site-packages directory that was created # inside self.tmp_dir install_cmd = self.get_finalized_command("install") self.testing_path = install_cmd.install_lib # Ideally, docs_path is set properly in run(), but if it is still # not set here, do not pretend it is, otherwise bad things happen. # See astropy/package-template#157 if self.docs_path is not None: new_docs_path = os.path.join( self.testing_path, os.path.basename(self.docs_path) ) shutil.copytree(self.docs_path, new_docs_path) self.docs_path = new_docs_path shutil.copy("setup.cfg", self.testing_path) def _change_permissions_testing_path(self, writable=False): if writable: basic_flags = stat.S_IRUSR | stat.S_IWUSR else: basic_flags = stat.S_IRUSR for root, dirs, files in os.walk(self.testing_path): for dirname in dirs: os.chmod(os.path.join(root, dirname), basic_flags | stat.S_IXUSR) for filename in files: os.chmod(os.path.join(root, filename), basic_flags) def _generate_coverage_commands(self): """ This method creates the post and pre commands if coverage is to be generated """ if self.parallel != 0: raise ValueError("--coverage can not be used with --parallel") try: import coverage # noqa: F401 except ImportError: raise ImportError( "--coverage requires that the coverage package is installed." ) # Don't use get_pkg_data_filename here, because it # requires importing astropy.config and thus screwing # up coverage results for those packages. coveragerc = os.path.join( self.testing_path, self.package_name.replace(".", "/"), "tests", "coveragerc", ) with open(coveragerc) as fd: coveragerc_content = fd.read() coveragerc_content = coveragerc_content.replace( "{packagename}", self.package_name.replace(".", "/") ) tmp_coveragerc = os.path.join(self.tmp_dir, "coveragerc") with open(tmp_coveragerc, "wb") as tmp: tmp.write(coveragerc_content.encode("utf-8")) cmd_pre = ( "import coverage; cov =" f' coverage.coverage(data_file=r"{os.path.abspath(".coverage")}",' f' config_file=r"{os.path.abspath(tmp_coveragerc)}"); cov.start();' ) cmd_post = ( "cov.stop(); from astropy.tests.helper import _save_coverage;" f' _save_coverage(cov, result, r"{os.path.abspath(".")}",' f' r"{os.path.abspath(self.testing_path)}");' ) return cmd_pre, cmd_post
6f7df7e8d7d7c4120544ad3129df3945c2341b4903af0a89fe6c4428c4efcb72
# Licensed under a 3-clause BSD style license - see LICENSE.rst import importlib import locale import logging import sys import warnings import pytest from astropy import log from astropy.logger import LoggingError, conf from astropy.utils.exceptions import AstropyUserWarning, AstropyWarning # Save original values of hooks. These are not the system values, but the # already overwritten values since the logger already gets imported before # this file gets executed. _excepthook = sys.__excepthook__ _showwarning = warnings.showwarning try: ip = get_ipython() except NameError: ip = None def setup_function(function): # Reset modules to default importlib.reload(warnings) importlib.reload(sys) # Reset internal original hooks log._showwarning_orig = None log._excepthook_orig = None # Set up the logger log._set_defaults() # Reset hooks if log.warnings_logging_enabled(): log.disable_warnings_logging() if log.exception_logging_enabled(): log.disable_exception_logging() teardown_module = setup_function def test_warnings_logging_disable_no_enable(): with pytest.raises(LoggingError, match=r"Warnings logging has not been enabled"): log.disable_warnings_logging() def test_warnings_logging_enable_twice(): log.enable_warnings_logging() with pytest.raises( LoggingError, match=r"Warnings logging has already been enabled" ): log.enable_warnings_logging() def test_warnings_logging_overridden(): log.enable_warnings_logging() warnings.showwarning = lambda: None with pytest.raises( LoggingError, match=r"Cannot disable warnings logging: " r"warnings\.showwarning was not set by this logger, or has been overridden", ): log.disable_warnings_logging() def test_warnings_logging(): # Without warnings logging with pytest.warns(AstropyUserWarning, match="This is a warning") as warn_list: with log.log_to_list() as log_list: warnings.warn("This is a warning", AstropyUserWarning) assert len(log_list) == 0 assert len(warn_list) == 1 # With warnings logging with warnings.catch_warnings(record=True) as warn_list: log.enable_warnings_logging() with log.log_to_list() as log_list: warnings.warn("This is a warning", AstropyUserWarning) log.disable_warnings_logging() assert len(log_list) == 1 assert len(warn_list) == 0 assert log_list[0].levelname == "WARNING" assert log_list[0].message.startswith("This is a warning") assert log_list[0].origin == "astropy.tests.test_logger" # With warnings logging (differentiate between Astropy and non-Astropy) with pytest.warns( UserWarning, match="This is another warning, not from Astropy" ) as warn_list: log.enable_warnings_logging() with log.log_to_list() as log_list: warnings.warn("This is a warning", AstropyUserWarning) warnings.warn("This is another warning, not from Astropy") log.disable_warnings_logging() assert len(log_list) == 1 assert len(warn_list) == 1 assert log_list[0].levelname == "WARNING" assert log_list[0].message.startswith("This is a warning") assert log_list[0].origin == "astropy.tests.test_logger" # Without warnings logging with pytest.warns(AstropyUserWarning, match="This is a warning") as warn_list: with log.log_to_list() as log_list: warnings.warn("This is a warning", AstropyUserWarning) assert len(log_list) == 0 assert len(warn_list) == 1 def test_warnings_logging_with_custom_class(): class CustomAstropyWarningClass(AstropyWarning): pass # With warnings logging with warnings.catch_warnings(record=True) as warn_list: log.enable_warnings_logging() with log.log_to_list() as log_list: warnings.warn("This is a warning", CustomAstropyWarningClass) log.disable_warnings_logging() assert len(log_list) == 1 assert len(warn_list) == 0 assert log_list[0].levelname == "WARNING" assert log_list[0].message.startswith( "CustomAstropyWarningClass: This is a warning" ) assert log_list[0].origin == "astropy.tests.test_logger" def test_warning_logging_with_io_votable_warning(): from astropy.io.votable.exceptions import W02, vo_warn with warnings.catch_warnings(record=True) as warn_list: log.enable_warnings_logging() with log.log_to_list() as log_list: vo_warn(W02, ("a", "b")) log.disable_warnings_logging() assert len(log_list) == 1 assert len(warn_list) == 0 assert log_list[0].levelname == "WARNING" x = log_list[0].message.startswith( "W02: ?:?:?: W02: a attribute 'b' is invalid. Must be a standard XML id" ) assert x assert log_list[0].origin == "astropy.tests.test_logger" def test_import_error_in_warning_logging(): """ Regression test for https://github.com/astropy/astropy/issues/2671 This test actually puts a goofy fake module into ``sys.modules`` to test this problem. """ class FakeModule: def __getattr__(self, attr): raise ImportError("_showwarning should ignore any exceptions here") log.enable_warnings_logging() sys.modules["<test fake module>"] = FakeModule() try: warnings.showwarning( AstropyWarning("Regression test for #2671"), AstropyWarning, "<this is only a test>", 1, ) finally: del sys.modules["<test fake module>"] def test_exception_logging_disable_no_enable(): with pytest.raises(LoggingError, match=r"Exception logging has not been enabled"): log.disable_exception_logging() def test_exception_logging_enable_twice(): log.enable_exception_logging() with pytest.raises( LoggingError, match=r"Exception logging has already been enabled" ): log.enable_exception_logging() @pytest.mark.skipif( ip is not None, reason="Cannot override exception handler in IPython" ) def test_exception_logging_overridden(): log.enable_exception_logging() sys.excepthook = lambda etype, evalue, tb: None with pytest.raises( LoggingError, match=( "Cannot disable exception logging: " "sys.excepthook was not set by this logger, or has been overridden" ), ): log.disable_exception_logging() @pytest.mark.xfail("ip is not None") def test_exception_logging(): # Without exception logging try: with log.log_to_list() as log_list: raise Exception("This is an Exception") except Exception as exc: sys.excepthook(*sys.exc_info()) assert exc.args[0] == "This is an Exception" else: assert False # exception should have been raised assert len(log_list) == 0 # With exception logging try: log.enable_exception_logging() with log.log_to_list() as log_list: raise Exception("This is an Exception") except Exception as exc: sys.excepthook(*sys.exc_info()) assert exc.args[0] == "This is an Exception" else: assert False # exception should have been raised assert len(log_list) == 1 assert log_list[0].levelname == "ERROR" assert log_list[0].message.startswith("Exception: This is an Exception") assert log_list[0].origin == "astropy.tests.test_logger" # Without exception logging log.disable_exception_logging() try: with log.log_to_list() as log_list: raise Exception("This is an Exception") except Exception as exc: sys.excepthook(*sys.exc_info()) assert exc.args[0] == "This is an Exception" else: assert False # exception should have been raised assert len(log_list) == 0 @pytest.mark.xfail("ip is not None") def test_exception_logging_origin(): # The point here is to get an exception raised from another location # and make sure the error's origin is reported correctly from astropy.utils.collections import HomogeneousList lst = HomogeneousList(int) try: log.enable_exception_logging() with log.log_to_list() as log_list: lst.append("foo") except TypeError as exc: sys.excepthook(*sys.exc_info()) assert exc.args[0].startswith( "homogeneous list must contain only objects of type " ) else: assert False assert len(log_list) == 1 assert log_list[0].levelname == "ERROR" assert log_list[0].message.startswith( "TypeError: homogeneous list must contain only objects of type " ) assert log_list[0].origin == "astropy.utils.collections" @pytest.mark.skip(reason="Infinite recursion on Python 3.5+, probably a real issue") # @pytest.mark.xfail("ip is not None") def test_exception_logging_argless_exception(): """ Regression test for a crash that occurred on Python 3 when logging an exception that was instantiated with no arguments (no message, etc.) Regression test for https://github.com/astropy/astropy/pull/4056 """ try: log.enable_exception_logging() with log.log_to_list() as log_list: raise Exception() except Exception: sys.excepthook(*sys.exc_info()) else: assert False # exception should have been raised assert len(log_list) == 1 assert log_list[0].levelname == "ERROR" assert log_list[0].message == "Exception [astropy.tests.test_logger]" assert log_list[0].origin == "astropy.tests.test_logger" @pytest.mark.parametrize("level", [None, "DEBUG", "INFO", "WARN", "ERROR"]) def test_log_to_list(level): orig_level = log.level try: if level is not None: log.setLevel(level) with log.log_to_list() as log_list: log.error("Error message") log.warning("Warning message") log.info("Information message") log.debug("Debug message") finally: log.setLevel(orig_level) if level is None: # The log level *should* be set to whatever it was in the config level = conf.log_level # Check list length if level == "DEBUG": assert len(log_list) == 4 elif level == "INFO": assert len(log_list) == 3 elif level == "WARN": assert len(log_list) == 2 elif level == "ERROR": assert len(log_list) == 1 # Check list content assert log_list[0].levelname == "ERROR" assert log_list[0].message.startswith("Error message") assert log_list[0].origin == "astropy.tests.test_logger" if len(log_list) >= 2: assert log_list[1].levelname == "WARNING" assert log_list[1].message.startswith("Warning message") assert log_list[1].origin == "astropy.tests.test_logger" if len(log_list) >= 3: assert log_list[2].levelname == "INFO" assert log_list[2].message.startswith("Information message") assert log_list[2].origin == "astropy.tests.test_logger" if len(log_list) >= 4: assert log_list[3].levelname == "DEBUG" assert log_list[3].message.startswith("Debug message") assert log_list[3].origin == "astropy.tests.test_logger" def test_log_to_list_level(): with log.log_to_list(filter_level="ERROR") as log_list: log.error("Error message") log.warning("Warning message") assert len(log_list) == 1 and log_list[0].levelname == "ERROR" def test_log_to_list_origin1(): with log.log_to_list(filter_origin="astropy.tests") as log_list: log.error("Error message") log.warning("Warning message") assert len(log_list) == 2 def test_log_to_list_origin2(): with log.log_to_list(filter_origin="astropy.wcs") as log_list: log.error("Error message") log.warning("Warning message") assert len(log_list) == 0 @pytest.mark.parametrize("level", [None, "DEBUG", "INFO", "WARN", "ERROR"]) def test_log_to_file(tmp_path, level): local_path = tmp_path / "test.log" log_file = local_path.open("wb") log_path = str(local_path.resolve()) orig_level = log.level try: if level is not None: log.setLevel(level) with log.log_to_file(log_path): log.error("Error message") log.warning("Warning message") log.info("Information message") log.debug("Debug message") log_file.close() finally: log.setLevel(orig_level) log_file = local_path.open("rb") log_entries = log_file.readlines() log_file.close() if level is None: # The log level *should* be set to whatever it was in the config level = conf.log_level # Check list length if level == "DEBUG": assert len(log_entries) == 4 elif level == "INFO": assert len(log_entries) == 3 elif level == "WARN": assert len(log_entries) == 2 elif level == "ERROR": assert len(log_entries) == 1 # Check list content assert eval(log_entries[0].strip())[-3:] == ( "astropy.tests.test_logger", "ERROR", "Error message", ) if len(log_entries) >= 2: assert eval(log_entries[1].strip())[-3:] == ( "astropy.tests.test_logger", "WARNING", "Warning message", ) if len(log_entries) >= 3: assert eval(log_entries[2].strip())[-3:] == ( "astropy.tests.test_logger", "INFO", "Information message", ) if len(log_entries) >= 4: assert eval(log_entries[3].strip())[-3:] == ( "astropy.tests.test_logger", "DEBUG", "Debug message", ) def test_log_to_file_level(tmp_path): local_path = tmp_path / "test.log" log_file = local_path.open("wb") log_path = str(local_path.resolve()) with log.log_to_file(log_path, filter_level="ERROR"): log.error("Error message") log.warning("Warning message") log_file.close() log_file = local_path.open("rb") log_entries = log_file.readlines() log_file.close() assert len(log_entries) == 1 assert eval(log_entries[0].strip())[-2:] == ("ERROR", "Error message") def test_log_to_file_origin1(tmp_path): local_path = tmp_path / "test.log" log_file = local_path.open("wb") log_path = str(local_path.resolve()) with log.log_to_file(log_path, filter_origin="astropy.tests"): log.error("Error message") log.warning("Warning message") log_file.close() log_file = local_path.open("rb") log_entries = log_file.readlines() log_file.close() assert len(log_entries) == 2 def test_log_to_file_origin2(tmp_path): local_path = tmp_path / "test.log" log_file = local_path.open("wb") log_path = str(local_path.resolve()) with log.log_to_file(log_path, filter_origin="astropy.wcs"): log.error("Error message") log.warning("Warning message") log_file.close() log_file = local_path.open("rb") log_entries = log_file.readlines() log_file.close() assert len(log_entries) == 0 @pytest.mark.parametrize("encoding", ["", "utf-8", "cp1252"]) def test_log_to_file_encoding(tmp_path, encoding): local_path = tmp_path / "test.log" log_path = str(local_path.resolve()) orig_encoding = conf.log_file_encoding conf.log_file_encoding = encoding with log.log_to_file(log_path): for handler in log.handlers: if isinstance(handler, logging.FileHandler): if encoding: assert handler.stream.encoding == encoding else: assert handler.stream.encoding == locale.getpreferredencoding() conf.log_file_encoding = orig_encoding
64b64195a6f68418557c8ac0579a629e27dc08a8b7c729ead7ded1f665b31e62
"""Implements the Astropy TestRunner which is a thin wrapper around pytest.""" import copy import glob import inspect import os import shlex import sys import tempfile import warnings from collections import OrderedDict from functools import wraps from importlib.util import find_spec from astropy.config.paths import set_temp_cache, set_temp_config from astropy.utils import find_current_module from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning __all__ = ["TestRunner", "TestRunnerBase", "keyword"] class keyword: """ A decorator to mark a method as keyword argument for the ``TestRunner``. Parameters ---------- default_value : `object` The default value for the keyword argument. (Default: `None`) priority : `int` keyword argument methods are executed in order of descending priority. """ def __init__(self, default_value=None, priority=0): self.default_value = default_value self.priority = priority def __call__(self, f): def keyword(*args, **kwargs): return f(*args, **kwargs) keyword._default_value = self.default_value keyword._priority = self.priority # Set __doc__ explicitly here rather than using wraps because we want # to keep the function name as keyword so we can inspect it later. keyword.__doc__ = f.__doc__ return keyword class TestRunnerBase: """ The base class for the TestRunner. A test runner can be constructed by creating a subclass of this class and defining 'keyword' methods. These are methods that have the :class:`~astropy.tests.runner.keyword` decorator, these methods are used to construct allowed keyword arguments to the ``run_tests`` method as a way to allow customization of individual keyword arguments (and associated logic) without having to re-implement the whole ``run_tests`` method. Examples -------- A simple keyword method:: class MyRunner(TestRunnerBase): @keyword('default_value'): def spam(self, spam, kwargs): \"\"\" spam : `str` The parameter description for the run_tests docstring. \"\"\" # Return value must be a list with a CLI parameter for pytest. return ['--spam={}'.format(spam)] """ def __init__(self, base_path): self.base_path = os.path.abspath(base_path) def __new__(cls, *args, **kwargs): # Before constructing the class parse all the methods that have been # decorated with ``keyword``. # The objective of this method is to construct a default set of keyword # arguments to the ``run_tests`` method. It does this by inspecting the # methods of the class for functions with the name ``keyword`` which is # the name of the decorator wrapping function. Once it has created this # dictionary, it also formats the docstring of ``run_tests`` to be # comprised of the docstrings for the ``keyword`` methods. # To add a keyword argument to the ``run_tests`` method, define a new # method decorated with ``@keyword`` and with the ``self, name, kwargs`` # signature. # Get all 'function' members as the wrapped methods are functions functions = inspect.getmembers(cls, predicate=inspect.isfunction) # Filter out anything that's not got the name 'keyword' keywords = filter(lambda func: func[1].__name__ == "keyword", functions) # Sort all keywords based on the priority flag. sorted_keywords = sorted(keywords, key=lambda x: x[1]._priority, reverse=True) cls.keywords = OrderedDict() doc_keywords = "" for name, func in sorted_keywords: # Here we test if the function has been overloaded to return # NotImplemented which is the way to disable arguments on # subclasses. If it has been disabled we need to remove it from the # default keywords dict. We do it in the try except block because # we do not have access to an instance of the class, so this is # going to error unless the method is just doing `return # NotImplemented`. try: # Second argument is False, as it is normally a bool. # The other two are placeholders for objects. if func(None, False, None) is NotImplemented: continue except Exception: pass # Construct the default kwargs dict and docstring cls.keywords[name] = func._default_value if func.__doc__: doc_keywords += " " * 8 doc_keywords += func.__doc__.strip() doc_keywords += "\n\n" cls.run_tests.__doc__ = cls.RUN_TESTS_DOCSTRING.format(keywords=doc_keywords) return super().__new__(cls) def _generate_args(self, **kwargs): # Update default values with passed kwargs # but don't modify the defaults keywords = copy.deepcopy(self.keywords) keywords.update(kwargs) # Iterate through the keywords (in order of priority) args = [] for keyword in keywords.keys(): func = getattr(self, keyword) result = func(keywords[keyword], keywords) # Allow disabling of options in a subclass if result is NotImplemented: raise TypeError( f"run_tests() got an unexpected keyword argument {keyword}" ) # keyword methods must return a list if not isinstance(result, list): raise TypeError(f"{keyword} keyword method must return a list") args += result return args RUN_TESTS_DOCSTRING = """ Run the tests for the package. This method builds arguments for and then calls ``pytest.main``. Parameters ---------- {keywords} """ _required_dependencies = [ "pytest", "pytest_remotedata", "pytest_doctestplus", "pytest_astropy_header", ] _missing_dependancy_error = ( "Test dependencies are missing: {module}. You should install the " "'pytest-astropy' package (you may need to update the package if you " "have a previous version installed, e.g., " "'pip install pytest-astropy --upgrade' or the equivalent with conda)." ) @classmethod def _has_test_dependencies(cls): # pragma: no cover # Using the test runner will not work without these dependencies, but # pytest-openfiles is optional, so it's not listed here. for module in cls._required_dependencies: spec = find_spec(module) # Checking loader accounts for packages that were uninstalled if spec is None or spec.loader is None: raise RuntimeError(cls._missing_dependancy_error.format(module=module)) def run_tests(self, **kwargs): # The following option will include eggs inside a .eggs folder in # sys.path when running the tests. This is possible so that when # running pytest, test dependencies installed via e.g. # tests_requires are available here. This is not an advertised option # since it is only for internal use if kwargs.pop("add_local_eggs_to_path", False): # Add each egg to sys.path individually for egg in glob.glob(os.path.join(".eggs", "*.egg")): sys.path.insert(0, egg) self._has_test_dependencies() # pragma: no cover # The docstring for this method is defined as a class variable. # This allows it to be built for each subclass in __new__. # Don't import pytest until it's actually needed to run the tests import pytest # Raise error for undefined kwargs allowed_kwargs = set(self.keywords.keys()) passed_kwargs = set(kwargs.keys()) if not passed_kwargs.issubset(allowed_kwargs): wrong_kwargs = list(passed_kwargs.difference(allowed_kwargs)) raise TypeError( f"run_tests() got an unexpected keyword argument {wrong_kwargs[0]}" ) args = self._generate_args(**kwargs) if kwargs.get("plugins", None) is not None: plugins = kwargs.pop("plugins") elif self.keywords.get("plugins", None) is not None: plugins = self.keywords["plugins"] else: plugins = [] # Override the config locations to not make a new directory nor use # existing cache or config. Note that we need to do this here in # addition to in conftest.py - for users running tests interactively # in e.g. IPython, conftest.py would get read in too late, so we need # to do it here - but at the same time the code here doesn't work when # running tests in parallel mode because this uses subprocesses which # don't know about the temporary config/cache. astropy_config = tempfile.mkdtemp("astropy_config") astropy_cache = tempfile.mkdtemp("astropy_cache") # Have to use nested with statements for cross-Python support # Note, using these context managers here is superfluous if the # config_dir or cache_dir options to pytest are in use, but it's # also harmless to nest the contexts with set_temp_config(astropy_config, delete=True): with set_temp_cache(astropy_cache, delete=True): return pytest.main(args=args, plugins=plugins) @classmethod def make_test_runner_in(cls, path): """ Constructs a `TestRunner` to run in the given path, and returns a ``test()`` function which takes the same arguments as ``TestRunner.run_tests``. The returned ``test()`` function will be defined in the module this was called from. This is used to implement the ``astropy.test()`` function (or the equivalent for affiliated packages). """ runner = cls(path) @wraps(runner.run_tests, ("__doc__",)) def test(**kwargs): return runner.run_tests(**kwargs) module = find_current_module(2) if module is not None: test.__module__ = module.__name__ # A somewhat unusual hack, but delete the attached __wrapped__ # attribute--although this is normally used to tell if the function # was wrapped with wraps, on some version of Python this is also # used to determine the signature to display in help() which is # not useful in this case. We don't really care in this case if the # function was wrapped either if hasattr(test, "__wrapped__"): del test.__wrapped__ test.__test__ = False return test class TestRunner(TestRunnerBase): """ A test runner for astropy tests """ def packages_path(self, packages, base_path, error=None, warning=None): """ Generates the path for multiple packages. Parameters ---------- packages : str Comma separated string of packages. base_path : str Base path to the source code or documentation. error : str Error message to be raised as ``ValueError``. Individual package name and path can be accessed by ``{name}`` and ``{path}`` respectively. No error is raised if `None`. (Default: `None`) warning : str Warning message to be issued. Individual package name and path can be accessed by ``{name}`` and ``{path}`` respectively. No warning is issues if `None`. (Default: `None`) Returns ------- paths : list of str List of strings of existing package paths. """ packages = packages.split(",") paths = [] for package in packages: path = os.path.join(base_path, package.replace(".", os.path.sep)) if not os.path.isdir(path): info = {"name": package, "path": path} if error is not None: raise ValueError(error.format(**info)) if warning is not None: warnings.warn(warning.format(**info)) else: paths.append(path) return paths # Increase priority so this warning is displayed first. @keyword(priority=1000) def coverage(self, coverage, kwargs): if coverage: warnings.warn( "The coverage option is ignored on run_tests, since it " "can not be made to work in that context. Use " "'python setup.py test --coverage' instead.", AstropyWarning, ) return [] # test_path depends on self.package_path so make sure this runs before # test_path. @keyword(priority=1) def package(self, package, kwargs): """ package : str, optional The name of a specific package to test, e.g. 'io.fits' or 'utils'. Accepts comma separated string to specify multiple packages. If nothing is specified all default tests are run. """ if package is None: self.package_path = [self.base_path] else: error_message = "package to test is not found: {name} (at path {path})." self.package_path = self.packages_path( package, self.base_path, error=error_message ) if not kwargs["test_path"]: return self.package_path return [] @keyword() def test_path(self, test_path, kwargs): """ test_path : str, optional Specify location to test by path. May be a single file or directory. Must be specified absolutely or relative to the calling directory. """ all_args = [] # Ensure that the package kwarg has been run. self.package(kwargs["package"], kwargs) if test_path: base, ext = os.path.splitext(test_path) if ext in (".rst", ""): if kwargs["docs_path"] is None: # This shouldn't happen from "python setup.py test" raise ValueError( "Can not test .rst files without a docs_path specified." ) abs_docs_path = os.path.abspath(kwargs["docs_path"]) abs_test_path = os.path.abspath( os.path.join(abs_docs_path, os.pardir, test_path) ) common = os.path.commonprefix((abs_docs_path, abs_test_path)) if os.path.exists(abs_test_path) and common == abs_docs_path: # Turn on the doctest_rst plugin all_args.append("--doctest-rst") test_path = abs_test_path # Check that the extensions are in the path and not at the end to # support specifying the name of the test, i.e. # test_quantity.py::test_unit if not ( os.path.isdir(test_path) or (".py" in test_path or ".rst" in test_path) ): raise ValueError( "Test path must be a directory or a path to a .py or .rst file" ) return all_args + [test_path] return [] @keyword() def args(self, args, kwargs): """ args : str, optional Additional arguments to be passed to ``pytest.main`` in the ``args`` keyword argument. """ if args: return shlex.split(args, posix=not sys.platform.startswith("win")) return [] @keyword(default_value=[]) def plugins(self, plugins, kwargs): """ plugins : list, optional Plugins to be passed to ``pytest.main`` in the ``plugins`` keyword argument. """ # Plugins are handled independently by `run_tests` so we define this # keyword just for the docstring return [] @keyword() def verbose(self, verbose, kwargs): """ verbose : bool, optional Convenience option to turn on verbose output from pytest. Passing True is the same as specifying ``-v`` in ``args``. """ if verbose: return ["-v"] return [] @keyword() def pastebin(self, pastebin, kwargs): """ pastebin : ('failed', 'all', None), optional Convenience option for turning on pytest pastebin output. Set to 'failed' to upload info for failed tests, or 'all' to upload info for all tests. """ if pastebin is not None: if pastebin in ["failed", "all"]: return [f"--pastebin={pastebin}"] else: raise ValueError("pastebin should be 'failed' or 'all'") return [] @keyword(default_value="none") def remote_data(self, remote_data, kwargs): """ remote_data : {'none', 'astropy', 'any'}, optional Controls whether to run tests marked with @pytest.mark.remote_data. This can be set to run no tests with remote data (``none``), only ones that use data from http://data.astropy.org (``astropy``), or all tests that use remote data (``any``). The default is ``none``. """ if remote_data is True: remote_data = "any" elif remote_data is False: remote_data = "none" elif remote_data not in ("none", "astropy", "any"): warnings.warn( "The remote_data option should be one of " f"none/astropy/any (found {remote_data}). For backward-compatibility, " "assuming 'any', but you should change the option to be " "one of the supported ones to avoid issues in " "future.", AstropyDeprecationWarning, ) remote_data = "any" return [f"--remote-data={remote_data}"] @keyword() def pep8(self, pep8, kwargs): """ pep8 : bool, optional Turn on PEP8 checking via the pytest-pep8 plugin and disable normal tests. Same as specifying ``--pep8 -k pep8`` in ``args``. """ if pep8: try: import pytest_pep8 # noqa: F401 except ImportError: raise ImportError( "PEP8 checking requires pytest-pep8 plugin: " "https://pypi.org/project/pytest-pep8" ) else: return ["--pep8", "-k", "pep8"] return [] @keyword() def pdb(self, pdb, kwargs): """ pdb : bool, optional Turn on PDB post-mortem analysis for failing tests. Same as specifying ``--pdb`` in ``args``. """ if pdb: return ["--pdb"] return [] @keyword() def open_files(self, open_files, kwargs): """ open_files : bool, optional Fail when any tests leave files open. Off by default, because this adds extra run time to the test suite. Requires the ``psutil`` package. """ if open_files: if kwargs["parallel"] != 0: raise SystemError( "open file detection may not be used in conjunction with " "parallel testing." ) try: import psutil # noqa: F401 except ImportError: raise SystemError( "open file detection requested, but psutil package " "is not installed." ) return ["--open-files"] print("Checking for unclosed files") return [] @keyword(0) def parallel(self, parallel, kwargs): """ parallel : int or 'auto', optional When provided, run the tests in parallel on the specified number of CPUs. If parallel is ``'auto'``, it will use the all the cores on the machine. Requires the ``pytest-xdist`` plugin. """ if parallel != 0: try: from xdist import plugin # noqa: F401 except ImportError: raise SystemError( "running tests in parallel requires the pytest-xdist package" ) return ["-n", str(parallel)] return [] @keyword() def docs_path(self, docs_path, kwargs): """ docs_path : str, optional The path to the documentation .rst files. """ paths = [] if docs_path is not None and not kwargs["skip_docs"]: if kwargs["package"] is not None: warning_message = ( "Can not test .rst docs for {name}, since " "docs path ({path}) does not exist." ) paths = self.packages_path( kwargs["package"], docs_path, warning=warning_message ) elif not kwargs["test_path"]: paths = [docs_path] if len(paths) and not kwargs["test_path"]: paths.append("--doctest-rst") return paths @keyword() def skip_docs(self, skip_docs, kwargs): """ skip_docs : `bool`, optional When `True`, skips running the doctests in the .rst files. """ # Skip docs is a bool used by docs_path only. return [] @keyword() def repeat(self, repeat, kwargs): """ repeat : `int`, optional If set, specifies how many times each test should be run. This is useful for diagnosing sporadic failures. """ if repeat: return [f"--repeat={repeat}"] return [] # Override run_tests for astropy-specific fixes def run_tests(self, **kwargs): # This prevents cyclical import problems that make it # impossible to test packages that define Table types on their # own. from astropy.table import Table # noqa: F401 return super().run_tests(**kwargs)
07a5f9ea31713c3d33e509cf146513a095808562ec132a8c6e021ec1f4f3c87a
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module provides the tools used to internally run the astropy test suite from the installed astropy. It makes use of the `pytest`_ testing framework. """ import functools import inspect import os import pickle import sys import warnings import pytest from astropy.units import allclose as quantity_allclose # noqa: F401 from astropy.utils.compat import PYTHON_LT_3_11 from astropy.utils.decorators import deprecated from astropy.utils.exceptions import ( AstropyDeprecationWarning, AstropyPendingDeprecationWarning, ) # For backward-compatibility with affiliated packages from .runner import TestRunner # noqa: F401 __all__ = [ "assert_follows_unicode_guidelines", "assert_quantity_allclose", "check_pickling_recovery", "pickle_protocol", "generic_recursive_equality_test", ] def _save_coverage(cov, result, rootdir, testing_path): """ This method is called after the tests have been run in coverage mode to cleanup and then save the coverage data and report. """ from astropy.utils.console import color_print if result != 0: return # The coverage report includes the full path to the temporary # directory, so we replace all the paths with the true source # path. Note that this will not work properly for packages that still # rely on 2to3. try: # Coverage 4.0: _harvest_data has been renamed to get_data, the # lines dict is private cov.get_data() except AttributeError: # Coverage < 4.0 cov._harvest_data() lines = cov.data.lines else: lines = cov.data._lines for key in list(lines.keys()): new_path = os.path.relpath( os.path.realpath(key), os.path.realpath(testing_path) ) new_path = os.path.abspath(os.path.join(rootdir, new_path)) lines[new_path] = lines.pop(key) color_print("Saving coverage data in .coverage...", "green") cov.save() color_print("Saving HTML coverage report in htmlcov...", "green") cov.html_report(directory=os.path.join(rootdir, "htmlcov")) @deprecated("5.1", alternative="pytest.raises") class raises: """ A decorator to mark that a test should raise a given exception. Use as follows:: @raises(ZeroDivisionError) def test_foo(): x = 1/0 This can also be used a context manager, in which case it is just an alias for the ``pytest.raises`` context manager (because the two have the same name this help avoid confusion by being flexible). .. note:: Usage of ``pytest.raises`` is preferred. """ # pep-8 naming exception -- this is a decorator class def __init__(self, exc): self._exc = exc self._ctx = None def __call__(self, func): @functools.wraps(func) def run_raises_test(*args, **kwargs): pytest.raises(self._exc, func, *args, **kwargs) return run_raises_test def __enter__(self): self._ctx = pytest.raises(self._exc) return self._ctx.__enter__() def __exit__(self, *exc_info): return self._ctx.__exit__(*exc_info) # TODO: Remove these when deprecation period of things deprecated in PR 12633 are removed. _deprecations_as_exceptions = False _include_astropy_deprecations = True _modules_to_ignore_on_import = { r"compiler", # A deprecated stdlib module used by pytest r"scipy", r"pygments", r"ipykernel", r"IPython", # deprecation warnings for async and await r"setuptools", } _warnings_to_ignore_entire_module = set() _warnings_to_ignore_by_pyver = { None: { # Python version agnostic # https://github.com/astropy/astropy/pull/7372 ( r"Importing from numpy\.testing\.decorators is deprecated, " r"import from numpy\.testing instead\.", DeprecationWarning, ), # inspect raises this slightly different warning on Python 3.7. # Keeping it since e.g. lxml as of 3.8.0 is still calling getargspec() ( r"inspect\.getargspec\(\) is deprecated, use " r"inspect\.signature\(\) or inspect\.getfullargspec\(\)", DeprecationWarning, ), # https://github.com/astropy/pytest-doctestplus/issues/29 (r"split\(\) requires a non-empty pattern match", FutureWarning), # Package resolution warning that we can do nothing about ( r"can't resolve package from __spec__ or __package__, " r"falling back on __name__ and __path__", ImportWarning, ), }, (3, 7): { # Deprecation warning for collections.abc, fixed in Astropy but still # used in lxml, and maybe others (r"Using or importing the ABCs from 'collections'", DeprecationWarning) }, } @deprecated("5.1", alternative="https://docs.pytest.org/en/stable/warnings.html") def enable_deprecations_as_exceptions( include_astropy_deprecations=True, modules_to_ignore_on_import=[], warnings_to_ignore_entire_module=[], warnings_to_ignore_by_pyver={}, ): """ Turn on the feature that turns deprecations into exceptions. Parameters ---------- include_astropy_deprecations : bool If set to `True`, ``AstropyDeprecationWarning`` and ``AstropyPendingDeprecationWarning`` are also turned into exceptions. modules_to_ignore_on_import : list of str List of additional modules that generate deprecation warnings on import, which are to be ignored. By default, these are already included: ``compiler``, ``scipy``, ``pygments``, ``ipykernel``, and ``setuptools``. warnings_to_ignore_entire_module : list of str List of modules with deprecation warnings to ignore completely, not just during import. If ``include_astropy_deprecations=True`` is given, ``AstropyDeprecationWarning`` and ``AstropyPendingDeprecationWarning`` are also ignored for the modules. warnings_to_ignore_by_pyver : dict Dictionary mapping tuple of ``(major, minor)`` Python version to a list of ``(warning_message, warning_class)`` to ignore. Python version-agnostic warnings should be mapped to `None` key. This is in addition of those already ignored by default (see ``_warnings_to_ignore_by_pyver`` values). """ global _deprecations_as_exceptions _deprecations_as_exceptions = True global _include_astropy_deprecations _include_astropy_deprecations = include_astropy_deprecations global _modules_to_ignore_on_import _modules_to_ignore_on_import.update(modules_to_ignore_on_import) global _warnings_to_ignore_entire_module _warnings_to_ignore_entire_module.update(warnings_to_ignore_entire_module) global _warnings_to_ignore_by_pyver for key, val in warnings_to_ignore_by_pyver.items(): if key in _warnings_to_ignore_by_pyver: _warnings_to_ignore_by_pyver[key].update(val) else: _warnings_to_ignore_by_pyver[key] = set(val) @deprecated("5.1", alternative="https://docs.pytest.org/en/stable/warnings.html") def treat_deprecations_as_exceptions(): """ Turn all DeprecationWarnings (which indicate deprecated uses of Python itself or Numpy, but not within Astropy, where we use our own deprecation warning class) into exceptions so that we find out about them early. This completely resets the warning filters and any "already seen" warning state. """ # First, totally reset the warning state. The modules may change during # this iteration thus we copy the original state to a list to iterate # on. See https://github.com/astropy/astropy/pull/5513. for module in list(sys.modules.values()): try: del module.__warningregistry__ except Exception: pass if not _deprecations_as_exceptions: return warnings.resetwarnings() # Hide the next couple of DeprecationWarnings warnings.simplefilter("ignore", DeprecationWarning) # Here's the wrinkle: a couple of our third-party dependencies # (pytest and scipy) are still using deprecated features # themselves, and we'd like to ignore those. Fortunately, those # show up only at import time, so if we import those things *now*, # before we turn the warnings into exceptions, we're golden. for m in _modules_to_ignore_on_import: try: __import__(m) except ImportError: pass # Now, start over again with the warning filters warnings.resetwarnings() # Now, turn these warnings into exceptions _all_warns = [DeprecationWarning, FutureWarning, ImportWarning] # Only turn astropy deprecation warnings into exceptions if requested if _include_astropy_deprecations: _all_warns += [AstropyDeprecationWarning, AstropyPendingDeprecationWarning] for w in _all_warns: warnings.filterwarnings("error", ".*", w) # This ignores all specified warnings from given module(s), # not just on import, for use of Astropy affiliated packages. for m in _warnings_to_ignore_entire_module: for w in _all_warns: warnings.filterwarnings("ignore", category=w, module=m) # This ignores only specified warnings by Python version, if applicable. for v in _warnings_to_ignore_by_pyver: if v is None or sys.version_info[:2] == v: for s in _warnings_to_ignore_by_pyver[v]: warnings.filterwarnings("ignore", s[0], s[1]) @deprecated("5.1", alternative="pytest.warns") class catch_warnings(warnings.catch_warnings): """ A high-powered version of warnings.catch_warnings to use for testing and to make sure that there is no dependence on the order in which the tests are run. This completely blitzes any memory of any warnings that have appeared before so that all warnings will be caught and displayed. ``*args`` is a set of warning classes to collect. If no arguments are provided, all warnings are collected. Use as follows:: with catch_warnings(MyCustomWarning) as w: do.something.bad() assert len(w) > 0 .. note:: Usage of :ref:`pytest.warns <pytest:warns>` is preferred. """ def __init__(self, *classes): super().__init__(record=True) self.classes = classes def __enter__(self): warning_list = super().__enter__() treat_deprecations_as_exceptions() if len(self.classes) == 0: warnings.simplefilter("always") else: warnings.simplefilter("ignore") for cls in self.classes: warnings.simplefilter("always", cls) return warning_list def __exit__(self, type, value, traceback): treat_deprecations_as_exceptions() @deprecated("5.1", alternative="pytest.mark.filterwarnings") class ignore_warnings(catch_warnings): """ This can be used either as a context manager or function decorator to ignore all warnings that occur within a function or block of code. An optional category option can be supplied to only ignore warnings of a certain category or categories (if a list is provided). """ def __init__(self, category=None): super().__init__() if isinstance(category, type) and issubclass(category, Warning): self.category = [category] else: self.category = category def __call__(self, func): @functools.wraps(func) def wrapper(*args, **kwargs): # Originally this just reused self, but that doesn't work if the # function is called more than once so we need to make a new # context manager instance for each call with self.__class__(category=self.category): return func(*args, **kwargs) return wrapper def __enter__(self): retval = super().__enter__() if self.category is not None: for category in self.category: warnings.simplefilter("ignore", category) else: warnings.simplefilter("ignore") return retval def assert_follows_unicode_guidelines(x, roundtrip=None): """ Test that an object follows our Unicode policy. See "Unicode guidelines" in the coding guidelines. Parameters ---------- x : object The instance to test roundtrip : module, optional When provided, this namespace will be used to evaluate ``repr(x)`` and ensure that it roundtrips. It will also ensure that ``__bytes__(x)`` roundtrip. If not provided, no roundtrip testing will be performed. """ from astropy import conf with conf.set_temp("unicode_output", False): bytes_x = bytes(x) unicode_x = str(x) repr_x = repr(x) assert isinstance(bytes_x, bytes) bytes_x.decode("ascii") assert isinstance(unicode_x, str) unicode_x.encode("ascii") assert isinstance(repr_x, str) if isinstance(repr_x, bytes): repr_x.decode("ascii") else: repr_x.encode("ascii") if roundtrip is not None: assert x.__class__(bytes_x) == x assert x.__class__(unicode_x) == x assert eval(repr_x, roundtrip) == x with conf.set_temp("unicode_output", True): bytes_x = bytes(x) unicode_x = str(x) repr_x = repr(x) assert isinstance(bytes_x, bytes) bytes_x.decode("ascii") assert isinstance(unicode_x, str) assert isinstance(repr_x, str) if isinstance(repr_x, bytes): repr_x.decode("ascii") else: repr_x.encode("ascii") if roundtrip is not None: assert x.__class__(bytes_x) == x assert x.__class__(unicode_x) == x assert eval(repr_x, roundtrip) == x @pytest.fixture(params=[0, 1, -1]) def pickle_protocol(request): """ Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced). (Originally from astropy.table.tests.test_pickle) """ return request.param def generic_recursive_equality_test(a, b, class_history): """ Check if the attributes of a and b are equal. Then, check if the attributes of the attributes are equal. """ if PYTHON_LT_3_11: dict_a = a.__getstate__() if hasattr(a, "__getstate__") else a.__dict__ else: # NOTE: The call may need to be adapted if other objects implementing a __getstate__ # with required argument(s) are passed to this function. # For a class with `__slots__` the default state is not a `dict`; # with neither `__dict__` nor `__slots__` it is `None`. state = a.__getstate__(a) if inspect.isclass(a) else a.__getstate__() dict_a = state if isinstance(state, dict) else getattr(a, "__dict__", dict()) dict_b = b.__dict__ for key in dict_a: assert key in dict_b, f"Did not pickle {key}" if dict_a[key].__class__.__eq__ is not object.__eq__: # Only compare if the class defines a proper equality test. # E.g., info does not define __eq__, and hence defers to # object.__eq__, which is equivalent to checking that two # instances are the same. This will generally not be true # after pickling. eq = dict_a[key] == dict_b[key] if "__iter__" in dir(eq): eq = False not in eq assert eq, f"Value of {key} changed by pickling" if hasattr(dict_a[key], "__dict__"): if dict_a[key].__class__ in class_history: # attempt to prevent infinite recursion pass else: new_class_history = [dict_a[key].__class__] new_class_history.extend(class_history) generic_recursive_equality_test( dict_a[key], dict_b[key], new_class_history ) def check_pickling_recovery(original, protocol): """ Try to pickle an object. If successful, make sure the object's attributes survived pickling and unpickling. """ f = pickle.dumps(original, protocol=protocol) unpickled = pickle.loads(f) class_history = [original.__class__] generic_recursive_equality_test(original, unpickled, class_history) def assert_quantity_allclose(actual, desired, rtol=1.0e-7, atol=None, **kwargs): """ Raise an assertion if two objects are not equal up to desired tolerance. This is a :class:`~astropy.units.Quantity`-aware version of :func:`numpy.testing.assert_allclose`. """ import numpy as np from astropy.units.quantity import _unquantify_allclose_arguments np.testing.assert_allclose( *_unquantify_allclose_arguments(actual, desired, rtol, atol), **kwargs )
21f4b9d26fe94e1c7f21a16ea8fa317ff5aa481e4dd44f66406229351c8e6d87
# Licensed under a 3-clause BSD style license - see LICENSE.rst from copy import deepcopy import numpy as np from astropy import units as u from astropy.table import QTable, Table, groups from astropy.time import Time, TimeDelta from astropy.timeseries.core import BaseTimeSeries, autocheck_required_columns from astropy.units import Quantity, UnitsError from astropy.utils.decorators import deprecated_renamed_argument __all__ = ["TimeSeries"] @autocheck_required_columns class TimeSeries(BaseTimeSeries): """ A class to represent time series data in tabular form. `~astropy.timeseries.TimeSeries` provides a class for representing time series as a collection of values of different quantities measured at specific points in time (for time series with finite time bins, see the `~astropy.timeseries.BinnedTimeSeries` class). `~astropy.timeseries.TimeSeries` is a sub-class of `~astropy.table.QTable` and thus provides all the standard table maniplation methods available to tables, but it also provides additional conveniences for dealing with time series, such as a flexible initializer for setting up the times, a method for folding time series, and a ``time`` attribute for easy access to the time values. See also: https://docs.astropy.org/en/stable/timeseries/ Parameters ---------- data : numpy ndarray, dict, list, `~astropy.table.Table`, or table-like object, optional Data to initialize time series. This does not need to contain the times, which can be provided separately, but if it does contain the times they should be in a column called ``'time'`` to be automatically recognized. time : `~astropy.time.Time`, `~astropy.time.TimeDelta` or iterable The times at which the values are sampled - this can be either given directly as a `~astropy.time.Time` or `~astropy.time.TimeDelta` array or as any iterable that initializes the `~astropy.time.Time` class. If this is given, then the remaining time-related arguments should not be used. time_start : `~astropy.time.Time` or str The time of the first sample in the time series. This is an alternative to providing ``time`` and requires that ``time_delta`` is also provided. time_delta : `~astropy.time.TimeDelta` or `~astropy.units.Quantity` ['time'] The step size in time for the series. This can either be a scalar if the time series is evenly sampled, or an array of values if it is not. n_samples : int The number of time samples for the series. This is only used if both ``time_start`` and ``time_delta`` are provided and are scalar values. **kwargs : dict, optional Additional keyword arguments are passed to `~astropy.table.QTable`. """ _required_columns = ["time"] def __init__( self, data=None, *, time=None, time_start=None, time_delta=None, n_samples=None, **kwargs, ): super().__init__(data=data, **kwargs) # For some operations, an empty time series needs to be created, then # columns added one by one. We should check that when columns are added # manually, time is added first and is of the right type. if data is None and time is None and time_start is None and time_delta is None: self._required_columns_relax = True return # First if time has been given in the table data, we should extract it # and treat it as if it had been passed as a keyword argument. if data is not None: if n_samples is not None: if n_samples != len(self): raise TypeError( "'n_samples' has been given both and it is not the " "same length as the input data." ) else: n_samples = len(self) if "time" in self.colnames: if time is None: time = self.columns["time"] else: raise TypeError( "'time' has been given both in the table and as a keyword argument" ) if time is None and time_start is None: raise TypeError("Either 'time' or 'time_start' should be specified") elif time is not None and time_start is not None: raise TypeError("Cannot specify both 'time' and 'time_start'") if time is not None and not isinstance(time, (Time, TimeDelta)): time = Time(time) if time_start is not None and not isinstance(time_start, (Time, TimeDelta)): time_start = Time(time_start) if time_delta is not None and not isinstance(time_delta, (Quantity, TimeDelta)): raise TypeError("'time_delta' should be a Quantity or a TimeDelta") if isinstance(time_delta, TimeDelta): time_delta = time_delta.sec * u.s if time_start is not None: # We interpret this as meaning that time is that of the first # sample and that the interval is given by time_delta. if time_delta is None: raise TypeError("'time' is scalar, so 'time_delta' is required") if time_delta.isscalar: time_delta = np.repeat(time_delta, n_samples) time_delta = np.cumsum(time_delta) time_delta = np.roll(time_delta, 1) time_delta[0] = 0.0 * u.s time = time_start + time_delta elif len(self.colnames) > 0 and len(time) != len(self): raise ValueError( f"Length of 'time' ({len(time)}) should match data length ({n_samples})" ) elif time_delta is not None: raise TypeError( "'time_delta' should not be specified since 'time' is an array" ) with self._delay_required_column_checks(): if "time" in self.colnames: self.remove_column("time") self.add_column(time, index=0, name="time") @property def time(self): """ The time values. """ return self["time"] @deprecated_renamed_argument("midpoint_epoch", "epoch_time", "4.0") def fold( self, period=None, epoch_time=None, epoch_phase=0, wrap_phase=None, normalize_phase=False, ): """ Return a new `~astropy.timeseries.TimeSeries` folded with a period and epoch. Parameters ---------- period : `~astropy.units.Quantity` ['time'] The period to use for folding epoch_time : `~astropy.time.Time` The time to use as the reference epoch, at which the relative time offset / phase will be ``epoch_phase``. Defaults to the first time in the time series. epoch_phase : float or `~astropy.units.Quantity` ['dimensionless', 'time'] Phase of ``epoch_time``. If ``normalize_phase`` is `True`, this should be a dimensionless value, while if ``normalize_phase`` is ``False``, this should be a `~astropy.units.Quantity` with time units. Defaults to 0. wrap_phase : float or `~astropy.units.Quantity` ['dimensionless', 'time'] The value of the phase above which values are wrapped back by one period. If ``normalize_phase`` is `True`, this should be a dimensionless value, while if ``normalize_phase`` is ``False``, this should be a `~astropy.units.Quantity` with time units. Defaults to half the period, so that the resulting time series goes from ``-period / 2`` to ``period / 2`` (if ``normalize_phase`` is `False`) or -0.5 to 0.5 (if ``normalize_phase`` is `True`). normalize_phase : bool If `False` phase is returned as `~astropy.time.TimeDelta`, otherwise as a dimensionless `~astropy.units.Quantity`. Returns ------- folded_timeseries : `~astropy.timeseries.TimeSeries` The folded time series object with phase as the ``time`` column. """ if not isinstance(period, Quantity) or period.unit.physical_type != "time": raise UnitsError("period should be a Quantity in units of time") folded = self.copy() if epoch_time is None: epoch_time = self.time[0] else: epoch_time = Time(epoch_time) period_sec = period.to_value(u.s) if normalize_phase: if ( isinstance(epoch_phase, Quantity) and epoch_phase.unit.physical_type != "dimensionless" ): raise UnitsError( "epoch_phase should be a dimensionless Quantity " "or a float when normalize_phase=True" ) epoch_phase_sec = epoch_phase * period_sec else: if epoch_phase == 0: epoch_phase_sec = 0.0 else: if ( not isinstance(epoch_phase, Quantity) or epoch_phase.unit.physical_type != "time" ): raise UnitsError( "epoch_phase should be a Quantity in units " "of time when normalize_phase=False" ) epoch_phase_sec = epoch_phase.to_value(u.s) if wrap_phase is None: wrap_phase = period_sec / 2 else: if normalize_phase: if isinstance( wrap_phase, Quantity ) and not wrap_phase.unit.is_equivalent(u.one): raise UnitsError( "wrap_phase should be dimensionless when normalize_phase=True" ) else: if wrap_phase < 0 or wrap_phase > 1: raise ValueError("wrap_phase should be between 0 and 1") else: wrap_phase = wrap_phase * period_sec else: if ( isinstance(wrap_phase, Quantity) and wrap_phase.unit.physical_type == "time" ): if wrap_phase < 0 or wrap_phase > period: raise ValueError( "wrap_phase should be between 0 and the period" ) else: wrap_phase = wrap_phase.to_value(u.s) else: raise UnitsError( "wrap_phase should be a Quantity in units " "of time when normalize_phase=False" ) relative_time_sec = ( (self.time - epoch_time).sec + epoch_phase_sec + (period_sec - wrap_phase) ) % period_sec - (period_sec - wrap_phase) folded_time = TimeDelta(relative_time_sec * u.s) if normalize_phase: folded_time = (folded_time / period).decompose() period = period_sec = 1 with folded._delay_required_column_checks(): folded.remove_column("time") folded.add_column(folded_time, name="time", index=0) return folded def __getitem__(self, item): if self._is_list_or_tuple_of_str(item): if "time" not in item: out = QTable( [self[x] for x in item], meta=deepcopy(self.meta), copy_indices=self._copy_indices, ) out._groups = groups.TableGroups( out, indices=self.groups._indices, keys=self.groups._keys ) return out return super().__getitem__(item) def add_column(self, *args, **kwargs): """ See :meth:`~astropy.table.Table.add_column`. """ # Note that the docstring is inherited from QTable result = super().add_column(*args, **kwargs) if len(self.indices) == 0 and "time" in self.colnames: self.add_index("time") return result def add_columns(self, *args, **kwargs): """ See :meth:`~astropy.table.Table.add_columns`. """ # Note that the docstring is inherited from QTable result = super().add_columns(*args, **kwargs) if len(self.indices) == 0 and "time" in self.colnames: self.add_index("time") return result @classmethod def from_pandas(self, df, time_scale="utc"): """ Convert a :class:`~pandas.DataFrame` to a :class:`astropy.timeseries.TimeSeries`. Parameters ---------- df : :class:`pandas.DataFrame` A pandas :class:`pandas.DataFrame` instance. time_scale : str The time scale to pass into `astropy.time.Time`. Defaults to ``UTC``. """ from pandas import DataFrame, DatetimeIndex if not isinstance(df, DataFrame): raise TypeError("Input should be a pandas DataFrame") if not isinstance(df.index, DatetimeIndex): raise TypeError("DataFrame does not have a DatetimeIndex") time = Time(df.index, scale=time_scale) table = Table.from_pandas(df) return TimeSeries(time=time, data=table) def to_pandas(self): """ Convert this :class:`~astropy.timeseries.TimeSeries` to a :class:`~pandas.DataFrame` with a :class:`~pandas.DatetimeIndex` index. Returns ------- dataframe : :class:`pandas.DataFrame` A pandas :class:`pandas.DataFrame` instance """ return Table(self).to_pandas(index="time") @classmethod def read( self, filename, time_column=None, time_format=None, time_scale=None, format=None, *args, **kwargs, ): """ Read and parse a file and returns a `astropy.timeseries.TimeSeries`. This method uses the unified I/O infrastructure in Astropy which makes it easy to define readers/writers for various classes (https://docs.astropy.org/en/stable/io/unified.html). By default, this method will try and use readers defined specifically for the `astropy.timeseries.TimeSeries` class - however, it is also possible to use the ``format`` keyword to specify formats defined for the `astropy.table.Table` class - in this case, you will need to also provide the column names for column containing the start times for the bins, as well as other column names (see the Parameters section below for details):: >>> from astropy.timeseries import TimeSeries >>> ts = TimeSeries.read('sampled.dat', format='ascii.ecsv', ... time_column='date') # doctest: +SKIP Parameters ---------- filename : str File to parse. format : str File format specifier. time_column : str, optional The name of the time column. time_format : str, optional The time format for the time column. time_scale : str, optional The time scale for the time column. *args : tuple, optional Positional arguments passed through to the data reader. **kwargs : dict, optional Keyword arguments passed through to the data reader. Returns ------- out : `astropy.timeseries.sampled.TimeSeries` TimeSeries corresponding to file contents. Notes ----- """ try: # First we try the readers defined for the BinnedTimeSeries class return super().read(filename, format=format, *args, **kwargs) except TypeError: # Otherwise we fall back to the default Table readers if time_column is None: raise ValueError( "``time_column`` should be provided since the default Table readers" " are being used." ) table = Table.read(filename, format=format, *args, **kwargs) if time_column in table.colnames: time = Time( table.columns[time_column], scale=time_scale, format=time_format ) table.remove_column(time_column) else: raise ValueError( f"Time column '{time_column}' not found in the input data." ) return TimeSeries(time=time, data=table)
4cac07b66bb678e70e7b9021bd16c63ce9753d4dd258b8eebb971a11d154b44b
# Licensed under a 3-clause BSD style license - see LICENSE.rst import warnings import numpy as np from astropy import units as u from astropy.time import Time, TimeDelta from astropy.timeseries.binned import BinnedTimeSeries from astropy.timeseries.sampled import TimeSeries from astropy.utils.exceptions import AstropyUserWarning __all__ = ["aggregate_downsample"] def reduceat(array, indices, function): """ Manual reduceat functionality for cases where Numpy functions don't have a reduceat. It will check if the input function has a reduceat and call that if it does. """ if len(indices) == 0: return np.array([]) elif hasattr(function, "reduceat"): return np.array(function.reduceat(array, indices)) else: result = [] for i in range(len(indices) - 1): if indices[i + 1] <= indices[i] + 1: result.append(function(array[indices[i]])) else: result.append(function(array[indices[i] : indices[i + 1]])) result.append(function(array[indices[-1] :])) return np.array(result) def _to_relative_longdouble(time: Time, rel_base: Time) -> np.longdouble: # Convert the time objects into plain ndarray # so that they be used to make various operations faster, including # - `np.searchsorted()` # - time comparison. # # Relative time in seconds with np.longdouble type is used to: # - a consistent format for search, irrespective of the format/scale of the inputs, # - retain the best precision possible return (time - rel_base).to_value(format="sec", subfmt="long") def aggregate_downsample( time_series, *, time_bin_size=None, time_bin_start=None, time_bin_end=None, n_bins=None, aggregate_func=None ): """ Downsample a time series by binning values into bins with a fixed size or custom sizes, using a single function to combine the values in the bin. Parameters ---------- time_series : :class:`~astropy.timeseries.TimeSeries` The time series to downsample. time_bin_size : `~astropy.units.Quantity` or `~astropy.time.TimeDelta` ['time'], optional The time interval for the binned time series - this is either a scalar value (in which case all time bins will be assumed to have the same duration) or as an array of values (in which case each time bin can have a different duration). If this argument is provided, ``time_bin_end`` should not be provided. time_bin_start : `~astropy.time.Time` or iterable, optional The start time for the binned time series - this can be either given directly as a `~astropy.time.Time` array or as any iterable that initializes the `~astropy.time.Time` class. This can also be a scalar value if ``time_bin_size`` or ``time_bin_end`` is provided. Defaults to the first time in the sampled time series. time_bin_end : `~astropy.time.Time` or iterable, optional The times of the end of each bin - this can be either given directly as a `~astropy.time.Time` array or as any iterable that initializes the `~astropy.time.Time` class. This can only be given if ``time_bin_start`` is provided or its default is used. If ``time_bin_end`` is scalar and ``time_bin_start`` is an array, time bins are assumed to be contiguous; the end of each bin is the start of the next one, and ``time_bin_end`` gives the end time for the last bin. If ``time_bin_end`` is an array and ``time_bin_start`` is scalar, bins will be contiguous. If both ``time_bin_end`` and ``time_bin_start`` are arrays, bins do not need to be contiguous. If this argument is provided, ``time_bin_size`` should not be provided. n_bins : int, optional The number of bins to use. Defaults to the number needed to fit all the original points. If both ``time_bin_start`` and ``time_bin_size`` are provided and are scalar values, this determines the total bins within that interval. If ``time_bin_start`` is an iterable, this parameter will be ignored. aggregate_func : callable, optional The function to use for combining points in the same bin. Defaults to np.nanmean. Returns ------- binned_time_series : :class:`~astropy.timeseries.BinnedTimeSeries` The downsampled time series. """ if not isinstance(time_series, TimeSeries): raise TypeError("time_series should be a TimeSeries") if time_bin_size is not None and not isinstance( time_bin_size, (u.Quantity, TimeDelta) ): raise TypeError("'time_bin_size' should be a Quantity or a TimeDelta") if time_bin_start is not None and not isinstance(time_bin_start, (Time, TimeDelta)): time_bin_start = Time(time_bin_start) if time_bin_end is not None and not isinstance(time_bin_end, (Time, TimeDelta)): time_bin_end = Time(time_bin_end) # Use the table sorted by time ts_sorted = time_series.iloc[:] # If start time is not provided, it is assumed to be the start of the timeseries if time_bin_start is None: time_bin_start = ts_sorted.time[0] # Total duration of the timeseries is needed for determining either # `time_bin_size` or `nbins` in the case of scalar `time_bin_start` if time_bin_start.isscalar: time_duration = (ts_sorted.time[-1] - time_bin_start).sec if time_bin_size is None and time_bin_end is None: if time_bin_start.isscalar: if n_bins is None: raise TypeError( "With single 'time_bin_start' either 'n_bins', " "'time_bin_size' or time_bin_end' must be provided" ) else: # `nbins` defaults to the number needed to fit all points time_bin_size = time_duration / n_bins * u.s else: time_bin_end = np.maximum(ts_sorted.time[-1], time_bin_start[-1]) if time_bin_start.isscalar: if time_bin_size is not None: if time_bin_size.isscalar: # Determine the number of bins if n_bins is None: bin_size_sec = time_bin_size.to_value(u.s) n_bins = int(np.ceil(time_duration / bin_size_sec)) elif time_bin_end is not None: if not time_bin_end.isscalar: # Convert start time to an array and populate using `time_bin_end` scalar_start_time = time_bin_start time_bin_start = time_bin_end.replicate(copy=True) time_bin_start[0] = scalar_start_time time_bin_start[1:] = time_bin_end[:-1] # Check for overlapping bins, and warn if they are present if time_bin_end is not None: if ( not time_bin_end.isscalar and not time_bin_start.isscalar and np.any(time_bin_start[1:] < time_bin_end[:-1]) ): warnings.warn( "Overlapping bins should be avoided since they " "can lead to double-counting of data during binning.", AstropyUserWarning, ) binned = BinnedTimeSeries( time_bin_size=time_bin_size, time_bin_start=time_bin_start, time_bin_end=time_bin_end, n_bins=n_bins, ) if aggregate_func is None: aggregate_func = np.nanmean # Start and end times of the binned timeseries bin_start = binned.time_bin_start bin_end = binned.time_bin_end # Set `n_bins` to match the length of `time_bin_start` if # `n_bins` is unspecified or if `time_bin_start` is an iterable if n_bins is None or not time_bin_start.isscalar: n_bins = len(bin_start) # Find the subset of the table that is inside the union of all bins # - output: `keep` a mask to create the subset # - use relative time in seconds `np.longdouble`` in in creating `keep` to speed up # (`Time` object comparison is rather slow) # - tiny sacrifice on precision (< 0.01ns on 64 bit platform) rel_base = ts_sorted.time[0] rel_bin_start = _to_relative_longdouble(bin_start, rel_base) rel_bin_end = _to_relative_longdouble(bin_end, rel_base) rel_ts_sorted_time = _to_relative_longdouble(ts_sorted.time, rel_base) keep = (rel_ts_sorted_time >= rel_bin_start[0]) & ( rel_ts_sorted_time <= rel_bin_end[-1] ) # Find out indices to be removed because of noncontiguous bins # # Only need to check when adjacent bins have gaps, i.e., # bin_start[ind + 1] > bin_end[ind] # - see: https://github.com/astropy/astropy/issues/13058#issuecomment-1090846697 # on thoughts on how to reduce the number of times to loop noncontiguous_bins_indices = np.where(rel_bin_start[1:] > rel_bin_end[:-1])[0] for ind in noncontiguous_bins_indices: delete_indices = np.where( np.logical_and( rel_ts_sorted_time > rel_bin_end[ind], rel_ts_sorted_time < rel_bin_start[ind + 1], ) ) keep[delete_indices] = False rel_subset_time = rel_ts_sorted_time[keep] # Figure out which bin each row falls in by sorting with respect # to the bin end times indices = np.searchsorted(rel_bin_end, rel_subset_time) # For time == bin_start[i+1] == bin_end[i], let bin_start takes precedence if len(indices) and np.all(rel_bin_start[1:] >= rel_bin_end[:-1]): indices_start = np.searchsorted( rel_subset_time, rel_bin_start[rel_bin_start <= rel_ts_sorted_time[-1]] ) indices[indices_start] = np.arange(len(indices_start)) # Determine rows where values are defined if len(indices): groups = np.hstack([0, np.nonzero(np.diff(indices))[0] + 1]) else: groups = np.array([]) # Find unique indices to determine which rows in the final time series # will not be empty. unique_indices = np.unique(indices) # Add back columns subset = ts_sorted[keep] for colname in subset.colnames: if colname == "time": continue values = subset[colname] # FIXME: figure out how to avoid the following, if possible if not isinstance(values, (np.ndarray, u.Quantity)): warnings.warn( "Skipping column {0} since it has a mix-in type", AstropyUserWarning ) continue if isinstance(values, u.Quantity): data = u.Quantity(np.repeat(np.nan, n_bins), unit=values.unit) data[unique_indices] = u.Quantity( reduceat(values.value, groups, aggregate_func), values.unit, copy=False ) else: data = np.ma.zeros(n_bins, dtype=values.dtype) data.mask = 1 data[unique_indices] = reduceat(values, groups, aggregate_func) data.mask[unique_indices] = 0 binned[colname] = data return binned
3e65aa0c695bef7c5df2b2cc25b7cf3ae22e9fcf28fdd78ca43fffe1dd3aeb49
# Licensed under a 3-clause BSD style license - see LICENSE.rst from contextlib import contextmanager from functools import wraps from types import FunctionType from astropy.table import QTable __all__ = ["BaseTimeSeries", "autocheck_required_columns"] COLUMN_RELATED_METHODS = [ "add_column", "add_columns", "keep_columns", "remove_column", "remove_columns", "rename_column", ] def autocheck_required_columns(cls): """ This is a decorator that ensures that the table contains specific methods indicated by the _required_columns attribute. The aim is to decorate all methods that might affect the columns in the table and check for consistency after the methods have been run. """ def decorator_method(method): @wraps(method) def wrapper(self, *args, **kwargs): result = method(self, *args, **kwargs) self._check_required_columns() return result return wrapper for name in COLUMN_RELATED_METHODS: if not hasattr(cls, name) or not isinstance(getattr(cls, name), FunctionType): raise ValueError(f"{name} is not a valid method") setattr(cls, name, decorator_method(getattr(cls, name))) return cls class BaseTimeSeries(QTable): _required_columns = None _required_columns_enabled = True # If _required_column_relax is True, we don't require the columns to be # present but we do require them to be the correct ones IF present. Note # that this is a temporary state - as soon as the required columns # are all present, we toggle this to False _required_columns_relax = False def _check_required_columns(self): def as_scalar_or_list_str(obj): if not hasattr(obj, "__len__"): return f"'{obj}'" elif len(obj) == 1: return f"'{obj[0]}'" else: return str(obj) if not self._required_columns_enabled: return if self._required_columns is not None: if self._required_columns_relax: required_columns = self._required_columns[: len(self.colnames)] else: required_columns = self._required_columns plural = "s" if len(required_columns) > 1 else "" if not self._required_columns_relax and len(self.colnames) == 0: raise ValueError( f"{self.__class__.__name__} object is invalid - expected" f" '{required_columns[0]}' as the first column{plural} but time" " series has no columns" ) elif self.colnames[: len(required_columns)] != required_columns: raise ValueError( f"{self.__class__.__name__} object is invalid - expected" f" {as_scalar_or_list_str(required_columns)} as the first" f" column{plural} but found" f" {as_scalar_or_list_str(self.colnames[: len(required_columns)])}" ) if ( self._required_columns_relax and self._required_columns == self.colnames[: len(self._required_columns)] ): self._required_columns_relax = False @contextmanager def _delay_required_column_checks(self): self._required_columns_enabled = False yield self._required_columns_enabled = True self._check_required_columns()
686cea3b3b58908c27db754e3ce2b473c9fe72236d759c7b5663703fa561081f
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This subpackage contains classes and functions for work with time series. """ from astropy.timeseries import io from astropy.timeseries.binned import * from astropy.timeseries.core import * from astropy.timeseries.downsample import * from astropy.timeseries.periodograms import * from astropy.timeseries.sampled import *
adadc3ed511222a79b709206e30226092340d950ba6f3dea476b74d14e479a3a
# Licensed under a 3-clause BSD style license - see LICENSE.rst from copy import deepcopy import numpy as np from astropy import units as u from astropy.table import QTable, Table, groups from astropy.time import Time, TimeDelta from astropy.timeseries.core import BaseTimeSeries, autocheck_required_columns from astropy.units import Quantity __all__ = ["BinnedTimeSeries"] @autocheck_required_columns class BinnedTimeSeries(BaseTimeSeries): """ A class to represent binned time series data in tabular form. `~astropy.timeseries.BinnedTimeSeries` provides a class for representing time series as a collection of values of different quantities measured in time bins (for time series with values sampled at specific times, see the `~astropy.timeseries.TimeSeries` class). `~astropy.timeseries.BinnedTimeSeries` is a sub-class of `~astropy.table.QTable` and thus provides all the standard table maniplation methods available to tables, but it also provides additional conveniences for dealing with time series, such as a flexible initializer for setting up the times, and attributes to access the start/center/end time of bins. See also: https://docs.astropy.org/en/stable/timeseries/ Parameters ---------- data : numpy ndarray, dict, list, table-like object, optional Data to initialize time series. This does not need to contain the times, which can be provided separately, but if it does contain the times they should be in columns called ``'time_bin_start'`` and ``'time_bin_size'`` to be automatically recognized. time_bin_start : `~astropy.time.Time` or iterable The times of the start of each bin - this can be either given directly as a `~astropy.time.Time` array or as any iterable that initializes the `~astropy.time.Time` class. If this is given, then the remaining time-related arguments should not be used. This can also be a scalar value if ``time_bin_size`` is provided. time_bin_end : `~astropy.time.Time` or iterable The times of the end of each bin - this can be either given directly as a `~astropy.time.Time` array or as any value or iterable that initializes the `~astropy.time.Time` class. If this is given, then the remaining time-related arguments should not be used. This can only be given if ``time_bin_start`` is an array of values. If ``time_bin_end`` is a scalar, time bins are assumed to be contiguous, such that the end of each bin is the start of the next one, and ``time_bin_end`` gives the end time for the last bin. If ``time_bin_end`` is an array, the time bins do not need to be contiguous. If this argument is provided, ``time_bin_size`` should not be provided. time_bin_size : `~astropy.time.TimeDelta` or `~astropy.units.Quantity` The size of the time bins, either as a scalar value (in which case all time bins will be assumed to have the same duration) or as an array of values (in which case each time bin can have a different duration). If this argument is provided, ``time_bin_end`` should not be provided. n_bins : int The number of time bins for the series. This is only used if both ``time_bin_start`` and ``time_bin_size`` are provided and are scalar values. **kwargs : dict, optional Additional keyword arguments are passed to `~astropy.table.QTable`. """ _required_columns = ["time_bin_start", "time_bin_size"] def __init__( self, data=None, *, time_bin_start=None, time_bin_end=None, time_bin_size=None, n_bins=None, **kwargs, ): super().__init__(data=data, **kwargs) # For some operations, an empty time series needs to be created, then # columns added one by one. We should check that when columns are added # manually, time is added first and is of the right type. if ( data is None and time_bin_start is None and time_bin_end is None and time_bin_size is None and n_bins is None ): self._required_columns_relax = True return # First if time_bin_start and time_bin_end have been given in the table data, we # should extract them and treat them as if they had been passed as # keyword arguments. if "time_bin_start" in self.colnames: if time_bin_start is None: time_bin_start = self.columns["time_bin_start"] else: raise TypeError( "'time_bin_start' has been given both in the table " "and as a keyword argument" ) if "time_bin_size" in self.colnames: if time_bin_size is None: time_bin_size = self.columns["time_bin_size"] else: raise TypeError( "'time_bin_size' has been given both in the table " "and as a keyword argument" ) if time_bin_start is None: raise TypeError("'time_bin_start' has not been specified") if time_bin_end is None and time_bin_size is None: raise TypeError( "Either 'time_bin_size' or 'time_bin_end' should be specified" ) if not isinstance(time_bin_start, (Time, TimeDelta)): time_bin_start = Time(time_bin_start) if time_bin_end is not None and not isinstance(time_bin_end, (Time, TimeDelta)): time_bin_end = Time(time_bin_end) if time_bin_size is not None and not isinstance( time_bin_size, (Quantity, TimeDelta) ): raise TypeError("'time_bin_size' should be a Quantity or a TimeDelta") if isinstance(time_bin_size, TimeDelta): time_bin_size = time_bin_size.sec * u.s if n_bins is not None and time_bin_size is not None: if not (time_bin_start.isscalar and time_bin_size.isscalar): raise TypeError( "'n_bins' cannot be specified if 'time_bin_start' or " "'time_bin_size' are not scalar'" ) if time_bin_start.isscalar: # We interpret this as meaning that this is the start of the # first bin and that the bins are contiguous. In this case, # we require time_bin_size to be specified. if time_bin_size is None: raise TypeError( "'time_bin_start' is scalar, so 'time_bin_size' is required" ) if time_bin_size.isscalar: if data is not None: if n_bins is not None: if n_bins != len(self): raise TypeError( "'n_bins' has been given and it is not the " "same length as the input data." ) else: n_bins = len(self) time_bin_size = np.repeat(time_bin_size, n_bins) time_delta = np.cumsum(time_bin_size) time_bin_end = time_bin_start + time_delta # Now shift the array so that the first entry is 0 time_delta = np.roll(time_delta, 1) time_delta[0] = 0.0 * u.s # Make time_bin_start into an array time_bin_start = time_bin_start + time_delta else: if len(self.colnames) > 0 and len(time_bin_start) != len(self): raise ValueError( f"Length of 'time_bin_start' ({len(time_bin_start)}) should match " f"table length ({len(self)})" ) if time_bin_end is not None: if time_bin_end.isscalar: times = time_bin_start.copy() times[:-1] = times[1:] times[-1] = time_bin_end time_bin_end = times time_bin_size = (time_bin_end - time_bin_start).sec * u.s if time_bin_size.isscalar: time_bin_size = np.repeat(time_bin_size, len(self)) with self._delay_required_column_checks(): if "time_bin_start" in self.colnames: self.remove_column("time_bin_start") if "time_bin_size" in self.colnames: self.remove_column("time_bin_size") self.add_column(time_bin_start, index=0, name="time_bin_start") self.add_index("time_bin_start") self.add_column(time_bin_size, index=1, name="time_bin_size") @property def time_bin_start(self): """ The start times of all the time bins. """ return self["time_bin_start"] @property def time_bin_center(self): """ The center times of all the time bins. """ return self["time_bin_start"] + self["time_bin_size"] * 0.5 @property def time_bin_end(self): """ The end times of all the time bins. """ return self["time_bin_start"] + self["time_bin_size"] @property def time_bin_size(self): """ The sizes of all the time bins. """ return self["time_bin_size"] def __getitem__(self, item): if self._is_list_or_tuple_of_str(item): if "time_bin_start" not in item or "time_bin_size" not in item: out = QTable( [self[x] for x in item], meta=deepcopy(self.meta), copy_indices=self._copy_indices, ) out._groups = groups.TableGroups( out, indices=self.groups._indices, keys=self.groups._keys ) return out return super().__getitem__(item) @classmethod def read( self, filename, time_bin_start_column=None, time_bin_end_column=None, time_bin_size_column=None, time_bin_size_unit=None, time_format=None, time_scale=None, format=None, *args, **kwargs, ): """ Read and parse a file and returns a `astropy.timeseries.BinnedTimeSeries`. This method uses the unified I/O infrastructure in Astropy which makes it easy to define readers/writers for various classes (https://docs.astropy.org/en/stable/io/unified.html). By default, this method will try and use readers defined specifically for the `astropy.timeseries.BinnedTimeSeries` class - however, it is also possible to use the ``format`` keyword to specify formats defined for the `astropy.table.Table` class - in this case, you will need to also provide the column names for column containing the start times for the bins, as well as other column names (see the Parameters section below for details):: >>> from astropy.timeseries.binned import BinnedTimeSeries >>> ts = BinnedTimeSeries.read('binned.dat', format='ascii.ecsv', ... time_bin_start_column='date_start', ... time_bin_end_column='date_end') # doctest: +SKIP Parameters ---------- filename : str File to parse. format : str File format specifier. time_bin_start_column : str The name of the column with the start time for each bin. time_bin_end_column : str, optional The name of the column with the end time for each bin. Either this option or ``time_bin_size_column`` should be specified. time_bin_size_column : str, optional The name of the column with the size for each bin. Either this option or ``time_bin_end_column`` should be specified. time_bin_size_unit : `astropy.units.Unit`, optional If ``time_bin_size_column`` is specified but does not have a unit set in the table, you can specify the unit manually. time_format : str, optional The time format for the start and end columns. time_scale : str, optional The time scale for the start and end columns. *args : tuple, optional Positional arguments passed through to the data reader. **kwargs : dict, optional Keyword arguments passed through to the data reader. Returns ------- out : `astropy.timeseries.binned.BinnedTimeSeries` BinnedTimeSeries corresponding to the file. """ try: # First we try the readers defined for the BinnedTimeSeries class return super().read(filename, format=format, *args, **kwargs) except TypeError: # Otherwise we fall back to the default Table readers if time_bin_start_column is None: raise ValueError( "``time_bin_start_column`` should be provided since the default" " Table readers are being used." ) if time_bin_end_column is None and time_bin_size_column is None: raise ValueError( "Either `time_bin_end_column` or `time_bin_size_column` should be" " provided." ) elif time_bin_end_column is not None and time_bin_size_column is not None: raise ValueError( "Cannot specify both `time_bin_end_column` and" " `time_bin_size_column`." ) table = Table.read(filename, format=format, *args, **kwargs) if time_bin_start_column in table.colnames: time_bin_start = Time( table.columns[time_bin_start_column], scale=time_scale, format=time_format, ) table.remove_column(time_bin_start_column) else: raise ValueError( f"Bin start time column '{time_bin_start_column}' not found in the" " input data." ) if time_bin_end_column is not None: if time_bin_end_column in table.colnames: time_bin_end = Time( table.columns[time_bin_end_column], scale=time_scale, format=time_format, ) table.remove_column(time_bin_end_column) else: raise ValueError( f"Bin end time column '{time_bin_end_column}' not found in the" " input data." ) time_bin_size = None elif time_bin_size_column is not None: if time_bin_size_column in table.colnames: time_bin_size = table.columns[time_bin_size_column] table.remove_column(time_bin_size_column) else: raise ValueError( f"Bin size column '{time_bin_size_column}' not found in the" " input data." ) if time_bin_size.unit is None: if time_bin_size_unit is None or not isinstance( time_bin_size_unit, u.UnitBase ): raise ValueError( "The bin size unit should be specified as an astropy Unit" " using ``time_bin_size_unit``." ) time_bin_size = time_bin_size * time_bin_size_unit else: time_bin_size = u.Quantity(time_bin_size) time_bin_end = None if time_bin_start.isscalar and time_bin_size.isscalar: return BinnedTimeSeries( data=table, time_bin_start=time_bin_start, time_bin_end=time_bin_end, time_bin_size=time_bin_size, n_bins=len(table), ) else: return BinnedTimeSeries( data=table, time_bin_start=time_bin_start, time_bin_end=time_bin_end, time_bin_size=time_bin_size, )
66b73e243bac684a661ea59862785e3834488f4916a95fbe51a01d1cffaeb061
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Astronomical and physics constants in SI units. See :mod:`astropy.constants` for a complete listing of constants defined in Astropy. """ import numpy as np from .constant import Constant # ASTRONOMICAL CONSTANTS class IAU2012(Constant): default_reference = "IAU 2012" _registry = {} _has_incompatible_units = set() # DISTANCE # Astronomical Unit au = IAU2012( "au", "Astronomical Unit", 1.49597870700e11, "m", 0.0, "IAU 2012 Resolution B2", system="si", ) # Parsec pc = IAU2012( "pc", "Parsec", au.value / np.tan(np.radians(1.0 / 3600.0)), "m", au.uncertainty / np.tan(np.radians(1.0 / 3600.0)), "Derived from au", system="si", ) # Kiloparsec kpc = IAU2012( "kpc", "Kiloparsec", 1000.0 * au.value / np.tan(np.radians(1.0 / 3600.0)), "m", 1000.0 * au.uncertainty / np.tan(np.radians(1.0 / 3600.0)), "Derived from au", system="si", ) # Luminosity not defined till 2015 (https://arxiv.org/abs/1510.06262) L_bol0 = IAU2012( "L_bol0", "Luminosity for absolute bolometric magnitude 0", 3.0128e28, "W", 0.0, "IAU 2015 Resolution B 2", system="si", ) # SOLAR QUANTITIES # Solar luminosity L_sun = IAU2012( "L_sun", "Solar luminosity", 3.846e26, "W", 0.0005e26, "Allen's Astrophysical Quantities 4th Ed.", system="si", ) # Solar mass M_sun = IAU2012( "M_sun", "Solar mass", 1.9891e30, "kg", 0.00005e30, "Allen's Astrophysical Quantities 4th Ed.", system="si", ) # Solar radius R_sun = IAU2012( "R_sun", "Solar radius", 6.95508e8, "m", 0.00026e8, "Allen's Astrophysical Quantities 4th Ed.", system="si", ) # OTHER SOLAR SYSTEM QUANTITIES # Jupiter mass M_jup = IAU2012( "M_jup", "Jupiter mass", 1.8987e27, "kg", 0.00005e27, "Allen's Astrophysical Quantities 4th Ed.", system="si", ) # Jupiter equatorial radius R_jup = IAU2012( "R_jup", "Jupiter equatorial radius", 7.1492e7, "m", 0.00005e7, "Allen's Astrophysical Quantities 4th Ed.", system="si", ) # Earth mass M_earth = IAU2012( "M_earth", "Earth mass", 5.9742e24, "kg", 0.00005e24, "Allen's Astrophysical Quantities 4th Ed.", system="si", ) # Earth equatorial radius R_earth = IAU2012( "R_earth", "Earth equatorial radius", 6.378136e6, "m", 0.0000005e6, "Allen's Astrophysical Quantities 4th Ed.", system="si", )
7f0702be20c7cb7c44fc45bcbdd5ec373e7e560e5e34d477e421f727423505ad
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Astronomical and physics constants in SI units. See :mod:`astropy.constants` for a complete listing of constants defined in Astropy. """ import numpy as np from .constant import Constant, EMConstant # PHYSICAL CONSTANTS class CODATA2014(Constant): default_reference = "CODATA 2014" _registry = {} _has_incompatible_units = set() class EMCODATA2014(CODATA2014, EMConstant): _registry = CODATA2014._registry h = CODATA2014( "h", "Planck constant", 6.626070040e-34, "J s", 0.000000081e-34, system="si" ) hbar = CODATA2014( "hbar", "Reduced Planck constant", 1.054571800e-34, "J s", 0.000000013e-34, system="si", ) k_B = CODATA2014( "k_B", "Boltzmann constant", 1.38064852e-23, "J / (K)", 0.00000079e-23, system="si" ) c = CODATA2014( "c", "Speed of light in vacuum", 299792458.0, "m / (s)", 0.0, system="si" ) G = CODATA2014( "G", "Gravitational constant", 6.67408e-11, "m3 / (kg s2)", 0.00031e-11, system="si" ) g0 = CODATA2014( "g0", "Standard acceleration of gravity", 9.80665, "m / s2", 0.0, system="si" ) m_p = CODATA2014( "m_p", "Proton mass", 1.672621898e-27, "kg", 0.000000021e-27, system="si" ) m_n = CODATA2014( "m_n", "Neutron mass", 1.674927471e-27, "kg", 0.000000021e-27, system="si" ) m_e = CODATA2014( "m_e", "Electron mass", 9.10938356e-31, "kg", 0.00000011e-31, system="si" ) u = CODATA2014("u", "Atomic mass", 1.660539040e-27, "kg", 0.000000020e-27, system="si") sigma_sb = CODATA2014( "sigma_sb", "Stefan-Boltzmann constant", 5.670367e-8, "W / (K4 m2)", 0.000013e-8, system="si", ) e = EMCODATA2014( "e", "Electron charge", 1.6021766208e-19, "C", 0.0000000098e-19, system="si" ) eps0 = EMCODATA2014( "eps0", "Electric constant", 8.854187817e-12, "F/m", 0.0, system="si" ) N_A = CODATA2014( "N_A", "Avogadro's number", 6.022140857e23, "1 / (mol)", 0.000000074e23, system="si" ) R = CODATA2014("R", "Gas constant", 8.3144598, "J / (K mol)", 0.0000048, system="si") Ryd = CODATA2014( "Ryd", "Rydberg constant", 10973731.568508, "1 / (m)", 0.000065, system="si" ) a0 = CODATA2014( "a0", "Bohr radius", 0.52917721067e-10, "m", 0.00000000012e-10, system="si" ) muB = CODATA2014( "muB", "Bohr magneton", 927.4009994e-26, "J/T", 0.00002e-26, system="si" ) alpha = CODATA2014( "alpha", "Fine-structure constant", 7.2973525664e-3, "", 0.0000000017e-3, system="si", ) atm = CODATA2014("atm", "Standard atmosphere", 101325, "Pa", 0.0, system="si") mu0 = CODATA2014("mu0", "Magnetic constant", 4.0e-7 * np.pi, "N/A2", 0.0, system="si") sigma_T = CODATA2014( "sigma_T", "Thomson scattering cross-section", 0.66524587158e-28, "m2", 0.00000000091e-28, system="si", ) b_wien = CODATA2014( "b_wien", "Wien wavelength displacement law constant", 2.8977729e-3, "m K", 0.0000017e-3, system="si", ) # cgs constants # Only constants that cannot be converted directly from S.I. are defined here. e_esu = EMCODATA2014( e.abbrev, e.name, e.value * c.value * 10.0, "statC", e.uncertainty * c.value * 10.0, system="esu", ) e_emu = EMCODATA2014( e.abbrev, e.name, e.value / 10, "abC", e.uncertainty / 10, system="emu" ) e_gauss = EMCODATA2014( e.abbrev, e.name, e.value * c.value * 10.0, "Fr", e.uncertainty * c.value * 10.0, system="gauss", )
fb6138ebebd4e766de14a4c1faf228ada96a63c1e040dfff6177563d45798353
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Contains astronomical and physical constants for use in Astropy or other places. A typical use case might be:: >>> from astropy.constants import c, m_e >>> # ... define the mass of something you want the rest energy of as m ... >>> m = m_e >>> E = m * c**2 >>> E.to('MeV') # doctest: +FLOAT_CMP <Quantity 0.510998927603161 MeV> """ import warnings from astropy.utils import find_current_module # Hack to make circular imports with units work # isort: split from astropy import units del units from . import cgs, si from . import utils as _utils from .config import codata, iaudata from .constant import Constant, EMConstant # for updating the constants module docstring _lines = [ "The following constants are available:\n", "========== ============== ================ =========================", " Name Value Unit Description", "========== ============== ================ =========================", ] # Catch warnings about "already has a definition in the None system" with warnings.catch_warnings(): warnings.filterwarnings("ignore", "Constant .*already has a definition") _utils._set_c( codata, iaudata, find_current_module(), not_in_module_only=True, doclines=_lines, set_class=True, ) _lines.append(_lines[1]) if __doc__ is not None: __doc__ += "\n".join(_lines) # Clean up namespace del find_current_module del warnings del _utils del _lines
32971fb0548a3279045d30285b3292f480d1b824e2b18c003b014d626db83ae8
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Astronomical and physics constants in SI units. See :mod:`astropy.constants` for a complete listing of constants defined in Astropy. """ import numpy as np from .constant import Constant, EMConstant # PHYSICAL CONSTANTS class CODATA2010(Constant): default_reference = "CODATA 2010" _registry = {} _has_incompatible_units = set() def __new__( cls, abbrev, name, value, unit, uncertainty, reference=default_reference, system=None, ): return super().__new__( cls, abbrev, name, value, unit, uncertainty, reference, system ) class EMCODATA2010(CODATA2010, EMConstant): _registry = CODATA2010._registry h = CODATA2010( "h", "Planck constant", 6.62606957e-34, "J s", 0.00000029e-34, system="si" ) hbar = CODATA2010( "hbar", "Reduced Planck constant", h.value * 0.5 / np.pi, "J s", h.uncertainty * 0.5 / np.pi, h.reference, system="si", ) k_B = CODATA2010( "k_B", "Boltzmann constant", 1.3806488e-23, "J / (K)", 0.0000013e-23, system="si" ) c = CODATA2010( "c", "Speed of light in vacuum", 2.99792458e8, "m / (s)", 0.0, system="si" ) G = CODATA2010( "G", "Gravitational constant", 6.67384e-11, "m3 / (kg s2)", 0.00080e-11, system="si" ) g0 = CODATA2010( "g0", "Standard acceleration of gravity", 9.80665, "m / s2", 0.0, system="si" ) m_p = CODATA2010( "m_p", "Proton mass", 1.672621777e-27, "kg", 0.000000074e-27, system="si" ) m_n = CODATA2010( "m_n", "Neutron mass", 1.674927351e-27, "kg", 0.000000074e-27, system="si" ) m_e = CODATA2010( "m_e", "Electron mass", 9.10938291e-31, "kg", 0.00000040e-31, system="si" ) u = CODATA2010("u", "Atomic mass", 1.660538921e-27, "kg", 0.000000073e-27, system="si") sigma_sb = CODATA2010( "sigma_sb", "Stefan-Boltzmann constant", 5.670373e-8, "W / (K4 m2)", 0.000021e-8, system="si", ) e = EMCODATA2010( "e", "Electron charge", 1.602176565e-19, "C", 0.000000035e-19, system="si" ) eps0 = EMCODATA2010( "eps0", "Electric constant", 8.854187817e-12, "F/m", 0.0, system="si" ) N_A = CODATA2010( "N_A", "Avogadro's number", 6.02214129e23, "1 / (mol)", 0.00000027e23, system="si" ) R = CODATA2010("R", "Gas constant", 8.3144621, "J / (K mol)", 0.0000075, system="si") Ryd = CODATA2010( "Ryd", "Rydberg constant", 10973731.568539, "1 / (m)", 0.000055, system="si" ) a0 = CODATA2010( "a0", "Bohr radius", 0.52917721092e-10, "m", 0.00000000017e-10, system="si" ) muB = CODATA2010( "muB", "Bohr magneton", 927.400968e-26, "J/T", 0.00002e-26, system="si" ) alpha = CODATA2010( "alpha", "Fine-structure constant", 7.2973525698e-3, "", 0.0000000024e-3, system="si", ) atm = CODATA2010("atm", "Standard atmosphere", 101325, "Pa", 0.0, system="si") mu0 = CODATA2010("mu0", "Magnetic constant", 4.0e-7 * np.pi, "N/A2", 0.0, system="si") sigma_T = CODATA2010( "sigma_T", "Thomson scattering cross-section", 0.6652458734e-28, "m2", 0.0000000013e-28, system="si", ) b_wien = Constant( "b_wien", "Wien wavelength displacement law constant", 2.8977721e-3, "m K", 0.0000026e-3, "CODATA 2010", system="si", ) # cgs constants # Only constants that cannot be converted directly from S.I. are defined here. e_esu = EMCODATA2010( e.abbrev, e.name, e.value * c.value * 10.0, "statC", e.uncertainty * c.value * 10.0, system="esu", ) e_emu = EMCODATA2010( e.abbrev, e.name, e.value / 10, "abC", e.uncertainty / 10, system="emu" ) e_gauss = EMCODATA2010( e.abbrev, e.name, e.value * c.value * 10.0, "Fr", e.uncertainty * c.value * 10.0, system="gauss", )
764430d977f841b67544628b1d147dd6d011122f0001b8a65982cde0dedcc1bc
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Astronomical and physics constants in SI units. See :mod:`astropy.constants` for a complete listing of constants defined in Astropy. """ import numpy as np from .config import codata from .constant import Constant # ASTRONOMICAL CONSTANTS class IAU2015(Constant): default_reference = "IAU 2015" _registry = {} _has_incompatible_units = set() # DISTANCE # Astronomical Unit (did not change from 2012) au = IAU2015( "au", "Astronomical Unit", 1.49597870700e11, "m", 0.0, "IAU 2012 Resolution B2", system="si", ) # Parsec pc = IAU2015( "pc", "Parsec", au.value / np.radians(1.0 / 3600.0), "m", au.uncertainty / np.radians(1.0 / 3600.0), "Derived from au + IAU 2015 Resolution B 2 note [4]", system="si", ) # Kiloparsec kpc = IAU2015( "kpc", "Kiloparsec", 1000.0 * au.value / np.radians(1.0 / 3600.0), "m", 1000.0 * au.uncertainty / np.radians(1.0 / 3600.0), "Derived from au + IAU 2015 Resolution B 2 note [4]", system="si", ) # Luminosity L_bol0 = IAU2015( "L_bol0", "Luminosity for absolute bolometric magnitude 0", 3.0128e28, "W", 0.0, "IAU 2015 Resolution B 2", system="si", ) # SOLAR QUANTITIES # Solar luminosity L_sun = IAU2015( "L_sun", "Nominal solar luminosity", 3.828e26, "W", 0.0, "IAU 2015 Resolution B 3", system="si", ) # Solar mass parameter GM_sun = IAU2015( "GM_sun", "Nominal solar mass parameter", 1.3271244e20, "m3 / (s2)", 0.0, "IAU 2015 Resolution B 3", system="si", ) # Solar mass (derived from mass parameter and gravitational constant) M_sun = IAU2015( "M_sun", "Solar mass", GM_sun.value / codata.G.value, "kg", ((codata.G.uncertainty / codata.G.value) * (GM_sun.value / codata.G.value)), f"IAU 2015 Resolution B 3 + {codata.G.reference}", system="si", ) # Solar radius R_sun = IAU2015( "R_sun", "Nominal solar radius", 6.957e8, "m", 0.0, "IAU 2015 Resolution B 3", system="si", ) # OTHER SOLAR SYSTEM QUANTITIES # Jupiter mass parameter GM_jup = IAU2015( "GM_jup", "Nominal Jupiter mass parameter", 1.2668653e17, "m3 / (s2)", 0.0, "IAU 2015 Resolution B 3", system="si", ) # Jupiter mass (derived from mass parameter and gravitational constant) M_jup = IAU2015( "M_jup", "Jupiter mass", GM_jup.value / codata.G.value, "kg", ((codata.G.uncertainty / codata.G.value) * (GM_jup.value / codata.G.value)), f"IAU 2015 Resolution B 3 + {codata.G.reference}", system="si", ) # Jupiter equatorial radius R_jup = IAU2015( "R_jup", "Nominal Jupiter equatorial radius", 7.1492e7, "m", 0.0, "IAU 2015 Resolution B 3", system="si", ) # Earth mass parameter GM_earth = IAU2015( "GM_earth", "Nominal Earth mass parameter", 3.986004e14, "m3 / (s2)", 0.0, "IAU 2015 Resolution B 3", system="si", ) # Earth mass (derived from mass parameter and gravitational constant) M_earth = IAU2015( "M_earth", "Earth mass", GM_earth.value / codata.G.value, "kg", ((codata.G.uncertainty / codata.G.value) * (GM_earth.value / codata.G.value)), f"IAU 2015 Resolution B 3 + {codata.G.reference}", system="si", ) # Earth equatorial radius R_earth = IAU2015( "R_earth", "Nominal Earth equatorial radius", 6.3781e6, "m", 0.0, "IAU 2015 Resolution B 3", system="si", )
8b7943d33c078bf620dd373fd732b9283f4634bc30a117acd3e0689899cae2b6
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Configures the codata and iaudata used, possibly using user configuration. """ # Note: doing this in __init__ causes import problems with units, # as si.py and cgs.py have to import the result. import importlib import astropy phys_version = astropy.physical_constants.get() astro_version = astropy.astronomical_constants.get() codata = importlib.import_module(".constants." + phys_version, "astropy") iaudata = importlib.import_module(".constants." + astro_version, "astropy")
ce632f9c2bdbaa1fe7644e684b2376a02ff54ae1da672ae4771ef1a2e8058f9c
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Astronomical and physics constants for Astropy v2.0. See :mod:`astropy.constants` for a complete listing of constants defined in Astropy. """ import warnings from astropy.utils import find_current_module from . import codata2014, iau2015 from . import utils as _utils codata = codata2014 iaudata = iau2015 _utils._set_c(codata, iaudata, find_current_module()) # Overwrite the following for consistency. # https://github.com/astropy/astropy/issues/8920 with warnings.catch_warnings(): warnings.filterwarnings("ignore", "Constant .*already has a definition") # Solar mass (derived from mass parameter and gravitational constant) M_sun = iau2015.IAU2015( "M_sun", "Solar mass", iau2015.GM_sun.value / codata2014.G.value, "kg", ( (codata2014.G.uncertainty / codata2014.G.value) * (iau2015.GM_sun.value / codata2014.G.value) ), f"IAU 2015 Resolution B 3 + {codata2014.G.reference}", system="si", ) # Jupiter mass (derived from mass parameter and gravitational constant) M_jup = iau2015.IAU2015( "M_jup", "Jupiter mass", iau2015.GM_jup.value / codata2014.G.value, "kg", ( (codata2014.G.uncertainty / codata2014.G.value) * (iau2015.GM_jup.value / codata2014.G.value) ), f"IAU 2015 Resolution B 3 + {codata2014.G.reference}", system="si", ) # Earth mass (derived from mass parameter and gravitational constant) M_earth = iau2015.IAU2015( "M_earth", "Earth mass", iau2015.GM_earth.value / codata2014.G.value, "kg", ( (codata2014.G.uncertainty / codata2014.G.value) * (iau2015.GM_earth.value / codata2014.G.value) ), f"IAU 2015 Resolution B 3 + {codata2014.G.reference}", system="si", ) # Clean up namespace del warnings del find_current_module del _utils
a417ea64bff35f0f09ee69a0cfea32b3ff669686a954d104e0e50ccc866a466f
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Utility functions for ``constants`` sub-package.""" import itertools __all__ = [] def _get_c(codata, iaudata, module, not_in_module_only=True): """ Generator to return a Constant object. Parameters ---------- codata, iaudata : obj Modules containing CODATA and IAU constants of interest. module : obj Namespace module of interest. not_in_module_only : bool If ``True``, ignore constants that are already in the namespace of ``module``. Returns ------- _c : Constant Constant object to process. """ from .constant import Constant for _nm, _c in itertools.chain( sorted(vars(codata).items()), sorted(vars(iaudata).items()) ): if not isinstance(_c, Constant): continue elif (not not_in_module_only) or (_c.abbrev not in module.__dict__): yield _c def _set_c( codata, iaudata, module, not_in_module_only=True, doclines=None, set_class=False ): """ Set constants in a given module namespace. Parameters ---------- codata, iaudata : obj Modules containing CODATA and IAU constants of interest. module : obj Namespace module to modify with the given ``codata`` and ``iaudata``. not_in_module_only : bool If ``True``, constants that are already in the namespace of ``module`` will not be modified. doclines : list or None If a list is given, this list will be modified in-place to include documentation of modified constants. This can be used to update docstring of ``module``. set_class : bool Namespace of ``module`` is populated with ``_c.__class__`` instead of just ``_c`` from :func:`_get_c`. """ for _c in _get_c(codata, iaudata, module, not_in_module_only=not_in_module_only): if set_class: value = _c.__class__( _c.abbrev, _c.name, _c.value, _c._unit_string, _c.uncertainty, _c.reference, ) else: value = _c setattr(module, _c.abbrev, value) if doclines is not None: doclines.append( "{:^10} {:^14.9g} {:^16} {}".format( _c.abbrev, _c.value, _c._unit_string, _c.name ) )
118858d84f033158452e67f5588d342c35b9e5dd8d47317db6ce958996c1f0a3
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Astronomical and physics constants in cgs units. See :mod:`astropy.constants` for a complete listing of constants defined in Astropy. """ import itertools from .config import codata, iaudata from .constant import Constant for _nm, _c in itertools.chain( sorted(vars(codata).items()), sorted(vars(iaudata).items()) ): if ( isinstance(_c, Constant) and _c.abbrev not in locals() and _c.system in ["esu", "gauss", "emu"] ): locals()[_c.abbrev] = _c
49f7a868431111aeea8ae27157738073ff886833bebd5012a7c1522f976186aa
# Licensed under a 3-clause BSD style license - see LICENSE.rst import functools import types import warnings import numpy as np from astropy.units.core import Unit, UnitsError from astropy.units.quantity import Quantity from astropy.utils import lazyproperty from astropy.utils.exceptions import AstropyUserWarning __all__ = ["Constant", "EMConstant"] class ConstantMeta(type): """Metaclass for `~astropy.constants.Constant`. The primary purpose of this is to wrap the double-underscore methods of `~astropy.units.Quantity` which is the superclass of `~astropy.constants.Constant`. In particular this wraps the operator overloads such as `__add__` to prevent their use with constants such as ``e`` from being used in expressions without specifying a system. The wrapper checks to see if the constant is listed (by name) in ``Constant._has_incompatible_units``, a set of those constants that are defined in different systems of units are physically incompatible. It also performs this check on each `Constant` if it hasn't already been performed (the check is deferred until the `Constant` is actually used in an expression to speed up import times, among other reasons). """ def __new__(mcls, name, bases, d): def wrap(meth): @functools.wraps(meth) def wrapper(self, *args, **kwargs): name_lower = self.name.lower() instances = self._registry[name_lower] if not self._checked_units: for inst in instances.values(): try: self.unit.to(inst.unit) except UnitsError: self._has_incompatible_units.add(name_lower) self._checked_units = True if not self.system and name_lower in self._has_incompatible_units: systems = sorted(x for x in instances if x) raise TypeError( f"Constant {self.abbrev!r} does not have physically compatible " "units across all systems of units and cannot be " "combined with other values without specifying a " f"system (eg. {self.abbrev}.{systems[0]})" ) return meth(self, *args, **kwargs) return wrapper # The wrapper applies to so many of the __ methods that it's easier to # just exclude the ones it doesn't apply to exclude = { "__new__", "__array_finalize__", "__array_wrap__", "__dir__", "__getattr__", "__init__", "__str__", "__repr__", "__hash__", "__iter__", "__getitem__", "__len__", "__bool__", "__quantity_subclass__", "__setstate__", } for attr, value in vars(Quantity).items(): if ( isinstance(value, types.FunctionType) and attr.startswith("__") and attr.endswith("__") and attr not in exclude ): d[attr] = wrap(value) return super().__new__(mcls, name, bases, d) class Constant(Quantity, metaclass=ConstantMeta): """A physical or astronomical constant. These objects are quantities that are meant to represent physical constants. Parameters ---------- abbrev : str A typical ASCII text abbreviation of the constant, generally the same as the Python variable used for this constant. name : str Full constant name. value : numbers.Real Constant value. Note that this should be a bare number, not a |Quantity|. unit : str String representation of the constant units. uncertainty : numbers.Real Absolute uncertainty in constant value. Note that this should be a bare number, not a |Quantity|. reference : str, optional Reference where the value is taken from. system : str System of units in which the constant is defined. This can be `None` when the constant's units can be directly converted between systems. """ _registry = {} _has_incompatible_units = set() def __new__( cls, abbrev, name, value, unit, uncertainty, reference=None, system=None ): if reference is None: reference = getattr(cls, "default_reference", None) if reference is None: raise TypeError(f"{cls} requires a reference.") name_lower = name.lower() instances = cls._registry.setdefault(name_lower, {}) # By-pass Quantity initialization, since units may not yet be # initialized here, and we store the unit in string form. inst = np.array(value).view(cls) if system in instances: warnings.warn( f"Constant {name!r} already has a definition in " f"the {system!r} system from {reference!r} reference", AstropyUserWarning, ) for c in instances.values(): if system is not None and not hasattr(c.__class__, system): setattr(c, system, inst) if c.system is not None and not hasattr(inst.__class__, c.system): setattr(inst, c.system, c) instances[system] = inst inst._abbrev = abbrev inst._name = name inst._value = value inst._unit_string = unit inst._uncertainty = uncertainty inst._reference = reference inst._system = system inst._checked_units = False return inst def __repr__(self): return ( f"<{self.__class__} " f"name={self.name!r} " f"value={self.value} " f"uncertainty={self.uncertainty} " f"unit={str(self.unit)!r} " f"reference={self.reference!r}>" ) def __str__(self): return ( f" Name = {self.name}\n" f" Value = {self.value}\n" f" Uncertainty = {self.uncertainty}\n" f" Unit = {self.unit}\n" f" Reference = {self.reference}" ) def __quantity_subclass__(self, unit): return super().__quantity_subclass__(unit)[0], False def copy(self): """ Return a copy of this `Constant` instance. Since they are by definition immutable, this merely returns another reference to ``self``. """ return self __deepcopy__ = __copy__ = copy @property def abbrev(self): """A typical ASCII text abbreviation of the constant, also generally the same as the Python variable used for this constant. """ return self._abbrev @property def name(self): """The full name of the constant.""" return self._name @lazyproperty def _unit(self): """The unit(s) in which this constant is defined.""" return Unit(self._unit_string) @property def uncertainty(self): """The known absolute uncertainty in this constant's value.""" return self._uncertainty @property def reference(self): """The source used for the value of this constant.""" return self._reference @property def system(self): """The system of units in which this constant is defined (typically `None` so long as the constant's units can be directly converted between systems). """ return self._system def _instance_or_super(self, key): instances = self._registry[self.name.lower()] inst = instances.get(key) if inst is not None: return inst else: return getattr(super(), key) @property def si(self): """If the Constant is defined in the SI system return that instance of the constant, else convert to a Quantity in the appropriate SI units. """ return self._instance_or_super("si") @property def cgs(self): """If the Constant is defined in the CGS system return that instance of the constant, else convert to a Quantity in the appropriate CGS units. """ return self._instance_or_super("cgs") def __array_finalize__(self, obj): for attr in ( "_abbrev", "_name", "_value", "_unit_string", "_uncertainty", "_reference", "_system", ): setattr(self, attr, getattr(obj, attr, None)) self._checked_units = getattr(obj, "_checked_units", False) class EMConstant(Constant): """An electromagnetic constant.""" @property def cgs(self): """Overridden for EMConstant to raise a `TypeError` emphasizing that there are multiple EM extensions to CGS. """ raise TypeError( "Cannot convert EM constants to cgs because there " "are different systems for E.M constants within the " "c.g.s system (ESU, Gaussian, etc.). Instead, " "directly use the constant with the appropriate " "suffix (e.g. e.esu, e.gauss, etc.)." )
cfd1fa234ae1b5b9191509532398c84268d5de6b9a27766f1daeff1473a51e50
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Astronomical and physics constants for Astropy v4.0. See :mod:`astropy.constants` for a complete listing of constants defined in Astropy. """ import warnings from astropy.utils import find_current_module from . import codata2018, iau2015 from . import utils as _utils codata = codata2018 iaudata = iau2015 _utils._set_c(codata, iaudata, find_current_module()) # Overwrite the following for consistency. # https://github.com/astropy/astropy/issues/8920 with warnings.catch_warnings(): warnings.filterwarnings("ignore", "Constant .*already has a definition") # Solar mass (derived from mass parameter and gravitational constant) M_sun = iau2015.IAU2015( "M_sun", "Solar mass", iau2015.GM_sun.value / codata2018.G.value, "kg", ( (codata2018.G.uncertainty / codata2018.G.value) * (iau2015.GM_sun.value / codata2018.G.value) ), f"IAU 2015 Resolution B 3 + {codata2018.G.reference}", system="si", ) # Jupiter mass (derived from mass parameter and gravitational constant) M_jup = iau2015.IAU2015( "M_jup", "Jupiter mass", iau2015.GM_jup.value / codata2018.G.value, "kg", ( (codata2018.G.uncertainty / codata2018.G.value) * (iau2015.GM_jup.value / codata2018.G.value) ), f"IAU 2015 Resolution B 3 + {codata2018.G.reference}", system="si", ) # Earth mass (derived from mass parameter and gravitational constant) M_earth = iau2015.IAU2015( "M_earth", "Earth mass", iau2015.GM_earth.value / codata2018.G.value, "kg", ( (codata2018.G.uncertainty / codata2018.G.value) * (iau2015.GM_earth.value / codata2018.G.value) ), f"IAU 2015 Resolution B 3 + {codata2018.G.reference}", system="si", ) # Clean up namespace del warnings del find_current_module del _utils
7d783d62bdbf2c9adba29024d1472e563fd1c2970177e0eb6f66578b5b968708
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Astronomical and physics constants in SI units. See :mod:`astropy.constants` for a complete listing of constants defined in Astropy. """ import itertools from .config import codata, iaudata from .constant import Constant for _nm, _c in itertools.chain( sorted(vars(codata).items()), sorted(vars(iaudata).items()) ): if isinstance(_c, Constant) and _c.abbrev not in locals() and _c.system == "si": locals()[_c.abbrev] = _c
a675b255df6b40c6bfbf1566802219440235a21b664e0ec2d6348e5d69a4cd97
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Astronomical and physics constants in SI units. See :mod:`astropy.constants` for a complete listing of constants defined in Astropy. """ import math from .constant import Constant, EMConstant # PHYSICAL CONSTANTS # https://en.wikipedia.org/wiki/2019_redefinition_of_SI_base_units class CODATA2018(Constant): default_reference = "CODATA 2018" _registry = {} _has_incompatible_units = set() class EMCODATA2018(CODATA2018, EMConstant): _registry = CODATA2018._registry h = CODATA2018("h", "Planck constant", 6.62607015e-34, "J s", 0.0, system="si") hbar = CODATA2018( "hbar", "Reduced Planck constant", h.value / (2 * math.pi), "J s", 0.0, system="si" ) k_B = CODATA2018("k_B", "Boltzmann constant", 1.380649e-23, "J / (K)", 0.0, system="si") c = CODATA2018( "c", "Speed of light in vacuum", 299792458.0, "m / (s)", 0.0, system="si" ) G = CODATA2018( "G", "Gravitational constant", 6.67430e-11, "m3 / (kg s2)", 0.00015e-11, system="si" ) g0 = CODATA2018( "g0", "Standard acceleration of gravity", 9.80665, "m / s2", 0.0, system="si" ) m_p = CODATA2018( "m_p", "Proton mass", 1.67262192369e-27, "kg", 0.00000000051e-27, system="si" ) m_n = CODATA2018( "m_n", "Neutron mass", 1.67492749804e-27, "kg", 0.00000000095e-27, system="si" ) m_e = CODATA2018( "m_e", "Electron mass", 9.1093837015e-31, "kg", 0.0000000028e-31, system="si" ) u = CODATA2018( "u", "Atomic mass", 1.66053906660e-27, "kg", 0.00000000050e-27, system="si" ) sigma_sb = CODATA2018( "sigma_sb", "Stefan-Boltzmann constant", 2 * math.pi**5 * k_B.value**4 / (15 * h.value**3 * c.value**2), "W / (K4 m2)", 0.0, system="si", ) e = EMCODATA2018("e", "Electron charge", 1.602176634e-19, "C", 0.0, system="si") eps0 = EMCODATA2018( "eps0", "Vacuum electric permittivity", 8.8541878128e-12, "F/m", 0.0000000013e-12, system="si", ) N_A = CODATA2018( "N_A", "Avogadro's number", 6.02214076e23, "1 / (mol)", 0.0, system="si" ) R = CODATA2018( "R", "Gas constant", k_B.value * N_A.value, "J / (K mol)", 0.0, system="si" ) Ryd = CODATA2018( "Ryd", "Rydberg constant", 10973731.568160, "1 / (m)", 0.000021, system="si" ) a0 = CODATA2018( "a0", "Bohr radius", 5.29177210903e-11, "m", 0.00000000080e-11, system="si" ) muB = CODATA2018( "muB", "Bohr magneton", 9.2740100783e-24, "J/T", 0.0000000028e-24, system="si" ) alpha = CODATA2018( "alpha", "Fine-structure constant", 7.2973525693e-3, "", 0.0000000011e-3, system="si", ) atm = CODATA2018("atm", "Standard atmosphere", 101325, "Pa", 0.0, system="si") mu0 = CODATA2018( "mu0", "Vacuum magnetic permeability", 1.25663706212e-6, "N/A2", 0.00000000019e-6, system="si", ) sigma_T = CODATA2018( "sigma_T", "Thomson scattering cross-section", 6.6524587321e-29, "m2", 0.0000000060e-29, system="si", ) # Formula taken from NIST wall chart. # The numerical factor is from a numerical solution to the equation for the # maximum. See https://en.wikipedia.org/wiki/Wien%27s_displacement_law b_wien = CODATA2018( "b_wien", "Wien wavelength displacement law constant", h.value * c.value / (k_B.value * 4.965114231744276), "m K", 0.0, system="si", ) # CGS constants. # Only constants that cannot be converted directly from S.I. are defined here. # Because both e and c are exact, these are also exact by definition. e_esu = EMCODATA2018( e.abbrev, e.name, e.value * c.value * 10.0, "statC", 0.0, system="esu" ) e_emu = EMCODATA2018(e.abbrev, e.name, e.value / 10, "abC", 0.0, system="emu") e_gauss = EMCODATA2018( e.abbrev, e.name, e.value * c.value * 10.0, "Fr", 0.0, system="gauss" )
321e5b1b8e8f323a427f0cfa9ec1a9356d45765ee640b3b1de8136169cd48737
# Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy.cosmology import units as cu from astropy.io import registry as io_registry from astropy.units import add_enabled_units __all__ = [ "CosmologyRead", "CosmologyWrite", "CosmologyFromFormat", "CosmologyToFormat", ] __doctest_skip__ = __all__ # ============================================================================== # Read / Write readwrite_registry = io_registry.UnifiedIORegistry() class CosmologyRead(io_registry.UnifiedReadWrite): """Read and parse data to a `~astropy.cosmology.Cosmology`. This function provides the Cosmology interface to the Astropy unified I/O layer. This allows easily reading a file in supported data formats using syntax such as:: >>> from astropy.cosmology import Cosmology >>> cosmo1 = Cosmology.read('<file name>') When the ``read`` method is called from a subclass the subclass will provide a keyword argument ``cosmology=<class>`` to the registered read method. The method uses this cosmology class, regardless of the class indicated in the file, and sets parameters' default values from the class' signature. Get help on the available readers using the ``help()`` method:: >>> Cosmology.read.help() # Get help reading and list supported formats >>> Cosmology.read.help(format='<format>') # Get detailed help on a format >>> Cosmology.read.list_formats() # Print list of available formats See also: https://docs.astropy.org/en/stable/io/unified.html Parameters ---------- *args Positional arguments passed through to data reader. If supplied the first argument is typically the input filename. format : str (optional, keyword-only) File format specifier. **kwargs Keyword arguments passed through to data reader. Returns ------- out : `~astropy.cosmology.Cosmology` subclass instance `~astropy.cosmology.Cosmology` corresponding to file contents. Notes ----- """ def __init__(self, instance, cosmo_cls): super().__init__(instance, cosmo_cls, "read", registry=readwrite_registry) def __call__(self, *args, **kwargs): from astropy.cosmology.core import Cosmology # so subclasses can override, also pass the class as a kwarg. # allows for `FlatLambdaCDM.read` and # `Cosmology.read(..., cosmology=FlatLambdaCDM)` if self._cls is not Cosmology: kwargs.setdefault("cosmology", self._cls) # set, if not present # check that it is the correct cosmology, can be wrong if user # passes in e.g. `w0wzCDM.read(..., cosmology=FlatLambdaCDM)` valid = (self._cls, self._cls.__qualname__) if kwargs["cosmology"] not in valid: raise ValueError( "keyword argument `cosmology` must be either the class " f"{valid[0]} or its qualified name '{valid[1]}'" ) with add_enabled_units(cu): cosmo = self.registry.read(self._cls, *args, **kwargs) return cosmo class CosmologyWrite(io_registry.UnifiedReadWrite): """Write this Cosmology object out in the specified format. This function provides the Cosmology interface to the astropy unified I/O layer. This allows easily writing a file in supported data formats using syntax such as:: >>> from astropy.cosmology import Planck18 >>> Planck18.write('<file name>') Get help on the available writers for ``Cosmology`` using the ``help()`` method:: >>> Cosmology.write.help() # Get help writing and list supported formats >>> Cosmology.write.help(format='<format>') # Get detailed help on format >>> Cosmology.write.list_formats() # Print list of available formats Parameters ---------- *args Positional arguments passed through to data writer. If supplied the first argument is the output filename. format : str (optional, keyword-only) File format specifier. **kwargs Keyword arguments passed through to data writer. Notes ----- """ def __init__(self, instance, cls): super().__init__(instance, cls, "write", registry=readwrite_registry) def __call__(self, *args, **kwargs): self.registry.write(self._instance, *args, **kwargs) # ============================================================================== # Format Interchange # for transforming instances, e.g. Cosmology <-> dict convert_registry = io_registry.UnifiedIORegistry() class CosmologyFromFormat(io_registry.UnifiedReadWrite): """Transform object to a `~astropy.cosmology.Cosmology`. This function provides the Cosmology interface to the Astropy unified I/O layer. This allows easily parsing supported data formats using syntax such as:: >>> from astropy.cosmology import Cosmology >>> cosmo1 = Cosmology.from_format(cosmo_mapping, format='mapping') When the ``from_format`` method is called from a subclass the subclass will provide a keyword argument ``cosmology=<class>`` to the registered parser. The method uses this cosmology class, regardless of the class indicated in the data, and sets parameters' default values from the class' signature. Get help on the available readers using the ``help()`` method:: >>> Cosmology.from_format.help() # Get help and list supported formats >>> Cosmology.from_format.help('<format>') # Get detailed help on a format >>> Cosmology.from_format.list_formats() # Print list of available formats See also: https://docs.astropy.org/en/stable/io/unified.html Parameters ---------- obj : object The object to parse according to 'format' *args Positional arguments passed through to data parser. format : str or None, optional keyword-only Object format specifier. For `None` (default) CosmologyFromFormat tries to identify the correct format. **kwargs Keyword arguments passed through to data parser. Parsers should accept the following keyword arguments: - cosmology : the class (or string name thereof) to use / check when constructing the cosmology instance. Returns ------- out : `~astropy.cosmology.Cosmology` subclass instance `~astropy.cosmology.Cosmology` corresponding to ``obj`` contents. """ def __init__(self, instance, cosmo_cls): super().__init__(instance, cosmo_cls, "read", registry=convert_registry) def __call__(self, obj, *args, format=None, **kwargs): from astropy.cosmology.core import Cosmology # so subclasses can override, also pass the class as a kwarg. # allows for `FlatLambdaCDM.read` and # `Cosmology.read(..., cosmology=FlatLambdaCDM)` if self._cls is not Cosmology: kwargs.setdefault("cosmology", self._cls) # set, if not present # check that it is the correct cosmology, can be wrong if user # passes in e.g. `w0wzCDM.read(..., cosmology=FlatLambdaCDM)` valid = (self._cls, self._cls.__qualname__) if kwargs["cosmology"] not in valid: raise ValueError( "keyword argument `cosmology` must be either the class " f"{valid[0]} or its qualified name '{valid[1]}'" ) with add_enabled_units(cu): cosmo = self.registry.read(self._cls, obj, *args, format=format, **kwargs) return cosmo class CosmologyToFormat(io_registry.UnifiedReadWrite): """Transform this Cosmology to another format. This function provides the Cosmology interface to the astropy unified I/O layer. This allows easily transforming to supported data formats using syntax such as:: >>> from astropy.cosmology import Planck18 >>> Planck18.to_format("mapping") {'cosmology': astropy.cosmology.core.FlatLambdaCDM, 'name': 'Planck18', 'H0': <Quantity 67.66 km / (Mpc s)>, 'Om0': 0.30966, ... Get help on the available representations for ``Cosmology`` using the ``help()`` method:: >>> Cosmology.to_format.help() # Get help and list supported formats >>> Cosmology.to_format.help('<format>') # Get detailed help on format >>> Cosmology.to_format.list_formats() # Print list of available formats Parameters ---------- format : str Format specifier. *args Positional arguments passed through to data writer. If supplied the first argument is the output filename. **kwargs Keyword arguments passed through to data writer. """ def __init__(self, instance, cls): super().__init__(instance, cls, "write", registry=convert_registry) def __call__(self, format, *args, **kwargs): return self.registry.write(self._instance, None, *args, format=format, **kwargs)
03859b07ab290afe0dafc97da29ba73c25565b937a8e67d8b74f748edfee5c58
# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import annotations import abc import inspect from typing import TYPE_CHECKING, Any, Mapping, TypeVar import numpy as np from astropy.io.registry import UnifiedReadWriteMethod from astropy.utils.decorators import classproperty from astropy.utils.metadata import MetaData from .connect import ( CosmologyFromFormat, CosmologyRead, CosmologyToFormat, CosmologyWrite, ) from .parameter import Parameter if TYPE_CHECKING: # pragma: no cover from astropy.cosmology.funcs.comparison import _FormatType # Originally authored by Andrew Becker ([email protected]), # and modified by Neil Crighton ([email protected]), Roban Kramer # ([email protected]), and Nathaniel Starkman ([email protected]). # Many of these adapted from Hogg 1999, astro-ph/9905116 # and Linder 2003, PRL 90, 91301 __all__ = ["Cosmology", "CosmologyError", "FlatCosmologyMixin"] __doctest_requires__ = {} # needed until __getattr__ removed ############################################################################## # Parameters # registry of cosmology classes with {key=name : value=class} _COSMOLOGY_CLASSES = dict() # typing _CosmoT = TypeVar("_CosmoT", bound="Cosmology") _FlatCosmoT = TypeVar("_FlatCosmoT", bound="FlatCosmologyMixin") ############################################################################## class CosmologyError(Exception): pass class Cosmology(metaclass=abc.ABCMeta): """Base-class for all Cosmologies. Parameters ---------- *args Arguments into the cosmology; used by subclasses, not this base class. name : str or None (optional, keyword-only) The name of the cosmology. meta : dict or None (optional, keyword-only) Metadata for the cosmology, e.g., a reference. **kwargs Arguments into the cosmology; used by subclasses, not this base class. Notes ----- Class instances are static -- you cannot (and should not) change the values of the parameters. That is, all of the above attributes (except meta) are read only. For details on how to create performant custom subclasses, see the documentation on :ref:`astropy-cosmology-fast-integrals`. """ meta = MetaData() # Unified I/O object interchange methods from_format = UnifiedReadWriteMethod(CosmologyFromFormat) to_format = UnifiedReadWriteMethod(CosmologyToFormat) # Unified I/O read and write methods read = UnifiedReadWriteMethod(CosmologyRead) write = UnifiedReadWriteMethod(CosmologyWrite) # Parameters __parameters__: tuple[str, ...] = () __all_parameters__: tuple[str, ...] = () # --------------------------------------------------------------- def __init_subclass__(cls): super().__init_subclass__() # ------------------- # Parameters # Get parameters that are still Parameters, either in this class or above. parameters = [] derived_parameters = [] for n in cls.__parameters__: p = getattr(cls, n) if isinstance(p, Parameter): derived_parameters.append(n) if p.derived else parameters.append(n) # Add new parameter definitions for n, v in cls.__dict__.items(): if n in parameters or n.startswith("_") or not isinstance(v, Parameter): continue derived_parameters.append(n) if v.derived else parameters.append(n) # reorder to match signature ordered = [ parameters.pop(parameters.index(n)) for n in cls._init_signature.parameters.keys() if n in parameters ] parameters = ordered + parameters # place "unordered" at the end cls.__parameters__ = tuple(parameters) cls.__all_parameters__ = cls.__parameters__ + tuple(derived_parameters) # ------------------- # register as a Cosmology subclass _COSMOLOGY_CLASSES[cls.__qualname__] = cls @classproperty(lazy=True) def _init_signature(cls): """Initialization signature (without 'self').""" # get signature, dropping "self" by taking arguments [1:] sig = inspect.signature(cls.__init__) sig = sig.replace(parameters=list(sig.parameters.values())[1:]) return sig # --------------------------------------------------------------- def __init__(self, name=None, meta=None): self._name = str(name) if name is not None else name self.meta.update(meta or {}) @property def name(self): """The name of the Cosmology instance.""" return self._name @property @abc.abstractmethod def is_flat(self): """ Return bool; `True` if the cosmology is flat. This is abstract and must be defined in subclasses. """ raise NotImplementedError("is_flat is not implemented") def clone(self, *, meta=None, **kwargs): """Returns a copy of this object with updated parameters, as specified. This cannot be used to change the type of the cosmology, so ``clone()`` cannot be used to change between flat and non-flat cosmologies. Parameters ---------- meta : mapping or None (optional, keyword-only) Metadata that will update the current metadata. **kwargs Cosmology parameter (and name) modifications. If any parameter is changed and a new name is not given, the name will be set to "[old name] (modified)". Returns ------- newcosmo : `~astropy.cosmology.Cosmology` subclass instance A new instance of this class with updated parameters as specified. If no arguments are given, then a reference to this object is returned instead of copy. Examples -------- To make a copy of the ``Planck13`` cosmology with a different matter density (``Om0``), and a new name: >>> from astropy.cosmology import Planck13 >>> Planck13.clone(name="Modified Planck 2013", Om0=0.35) FlatLambdaCDM(name="Modified Planck 2013", H0=67.77 km / (Mpc s), Om0=0.35, ... If no name is specified, the new name will note the modification. >>> Planck13.clone(Om0=0.35).name 'Planck13 (modified)' """ # Quick return check, taking advantage of the Cosmology immutability. if meta is None and not kwargs: return self # There are changed parameter or metadata values. # The name needs to be changed accordingly, if it wasn't already. _modname = self.name + " (modified)" kwargs.setdefault("name", (_modname if self.name is not None else None)) # mix new meta into existing, preferring the former. meta = meta if meta is not None else {} new_meta = {**self.meta, **meta} # Mix kwargs into initial arguments, preferring the former. new_init = {**self._init_arguments, "meta": new_meta, **kwargs} # Create BoundArgument to handle args versus kwargs. # This also handles all errors from mismatched arguments ba = self._init_signature.bind_partial(**new_init) # Instantiate, respecting args vs kwargs cloned = type(self)(*ba.args, **ba.kwargs) # Check if nothing has changed. # TODO! or should return self? if (cloned.name == _modname) and not meta and cloned.is_equivalent(self): cloned._name = self.name return cloned @property def _init_arguments(self): # parameters kw = {n: getattr(self, n) for n in self.__parameters__} # other info kw["name"] = self.name kw["meta"] = self.meta return kw # --------------------------------------------------------------- # comparison methods def is_equivalent(self, other: Any, /, *, format: _FormatType = False) -> bool: r"""Check equivalence between Cosmologies. Two cosmologies may be equivalent even if not the same class. For example, an instance of ``LambdaCDM`` might have :math:`\Omega_0=1` and :math:`\Omega_k=0` and therefore be flat, like ``FlatLambdaCDM``. Parameters ---------- other : `~astropy.cosmology.Cosmology` subclass instance, positional-only The object to which to compare. format : bool or None or str, optional keyword-only Whether to allow, before equivalence is checked, the object to be converted to a |Cosmology|. This allows, e.g. a |Table| to be equivalent to a Cosmology. `False` (default) will not allow conversion. `True` or `None` will, and will use the auto-identification to try to infer the correct format. A `str` is assumed to be the correct format to use when converting. ``format`` is broadcast to match the shape of ``other``. Note that the cosmology arguments are not broadcast against ``format``, so it cannot determine the output shape. Returns ------- bool True if cosmologies are equivalent, False otherwise. Examples -------- Two cosmologies may be equivalent even if not of the same class. In this examples the ``LambdaCDM`` has ``Ode0`` set to the same value calculated in ``FlatLambdaCDM``. >>> import astropy.units as u >>> from astropy.cosmology import LambdaCDM, FlatLambdaCDM >>> cosmo1 = LambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, 0.7) >>> cosmo2 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3) >>> cosmo1.is_equivalent(cosmo2) True While in this example, the cosmologies are not equivalent. >>> cosmo3 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, Tcmb0=3 * u.K) >>> cosmo3.is_equivalent(cosmo2) False Also, using the keyword argument, the notion of equivalence is extended to any Python object that can be converted to a |Cosmology|. >>> from astropy.cosmology import Planck18 >>> tbl = Planck18.to_format("astropy.table") >>> Planck18.is_equivalent(tbl, format=True) True The list of valid formats, e.g. the |Table| in this example, may be checked with ``Cosmology.from_format.list_formats()``. As can be seen in the list of formats, not all formats can be auto-identified by ``Cosmology.from_format.registry``. Objects of these kinds can still be checked for equivalence, but the correct format string must be used. >>> tbl = Planck18.to_format("yaml") >>> Planck18.is_equivalent(tbl, format="yaml") True """ from .funcs import cosmology_equal try: return cosmology_equal( self, other, format=(None, format), allow_equivalent=True ) except Exception: # `is_equivalent` allows `other` to be any object and returns False # if `other` cannot be converted to a Cosmology, rather than # raising an Exception. return False def __equiv__(self, other: Any, /) -> bool: """Cosmology equivalence. Use ``.is_equivalent()`` for actual check! Parameters ---------- other : `~astropy.cosmology.Cosmology` subclass instance, positional-only The object in which to compare. Returns ------- bool or `NotImplemented` `NotImplemented` if ``other`` is from a different class. `True` if ``other`` is of the same class and has matching parameters and parameter values. `False` otherwise. """ if other.__class__ is not self.__class__: return NotImplemented # allows other.__equiv__ # Check all parameters in 'other' match those in 'self' and 'other' has # no extra parameters (latter part should never happen b/c same class) return set(self.__all_parameters__) == set(other.__all_parameters__) and all( np.all(getattr(self, k) == getattr(other, k)) for k in self.__all_parameters__ ) def __eq__(self, other: Any, /) -> bool: """Check equality between Cosmologies. Checks the Parameters and immutable fields (i.e. not "meta"). Parameters ---------- other : `~astropy.cosmology.Cosmology` subclass instance, positional-only The object in which to compare. Returns ------- bool `True` if Parameters and names are the same, `False` otherwise. """ if other.__class__ is not self.__class__: return NotImplemented # allows other.__eq__ eq = ( # non-Parameter checks: name self.name == other.name # check all parameters in 'other' match those in 'self' and 'other' # has no extra parameters (latter part should never happen b/c same # class) TODO! element-wise when there are array cosmologies and set(self.__all_parameters__) == set(other.__all_parameters__) and all( np.all(getattr(self, k) == getattr(other, k)) for k in self.__all_parameters__ ) ) return eq # --------------------------------------------------------------- def __repr__(self): namelead = f"{self.__class__.__qualname__}(" if self.name is not None: namelead += f'name="{self.name}", ' # nicely formatted parameters fmtps = (f"{k}={getattr(self, k)}" for k in self.__parameters__) return namelead + ", ".join(fmtps) + ")" def __astropy_table__(self, cls, copy, **kwargs): """Return a `~astropy.table.Table` of type ``cls``. Parameters ---------- cls : type Astropy ``Table`` class or subclass. copy : bool Ignored. **kwargs : dict, optional Additional keyword arguments. Passed to ``self.to_format()``. See ``Cosmology.to_format.help("astropy.table")`` for allowed kwargs. Returns ------- `astropy.table.Table` or subclass instance Instance of type ``cls``. """ return self.to_format("astropy.table", cls=cls, **kwargs) class FlatCosmologyMixin(metaclass=abc.ABCMeta): """ Mixin class for flat cosmologies. Do NOT instantiate directly. Note that all instances of ``FlatCosmologyMixin`` are flat, but not all flat cosmologies are instances of ``FlatCosmologyMixin``. As example, ``LambdaCDM`` **may** be flat (for the a specific set of parameter values), but ``FlatLambdaCDM`` **will** be flat. """ __all_parameters__: tuple[str, ...] __parameters__: tuple[str, ...] def __init_subclass__(cls: type[_FlatCosmoT]) -> None: super().__init_subclass__() # Determine the non-flat class. # This will raise a TypeError if the MRO is inconsistent. cls.__nonflatclass__ # =============================================================== @classmethod # TODO! make metaclass-method def _get_nonflat_cls( cls, kls: type[_CosmoT] | None = None ) -> type[Cosmology] | None: """Find the corresponding non-flat class. The class' bases are searched recursively. Parameters ---------- kls : :class:`astropy.cosmology.Cosmology` class or None, optional If `None` (default) this class is searched instead of `kls`. Raises ------ TypeError If more than one non-flat class is found at the same level of the inheritance. This is similar to the error normally raised by Python for an inconsistent method resolution order. Returns ------- type A :class:`Cosmology` subclass this class inherits from that is not a :class:`FlatCosmologyMixin` subclass. """ _kls = cls if kls is None else kls # Find non-flat classes nonflat: set[type[Cosmology]] nonflat = { b for b in _kls.__bases__ if issubclass(b, Cosmology) and not issubclass(b, FlatCosmologyMixin) } if not nonflat: # e.g. subclassing FlatLambdaCDM nonflat = { k for b in _kls.__bases__ if (k := cls._get_nonflat_cls(b)) is not None } if len(nonflat) > 1: raise TypeError( "cannot create a consistent non-flat class resolution order " f"for {_kls} with bases {nonflat} at the same inheritance level." ) if not nonflat: # e.g. FlatFLRWMixin(FlatCosmologyMixin) return None return nonflat.pop() __nonflatclass__ = classproperty( _get_nonflat_cls, lazy=True, doc="Return the corresponding non-flat class." ) # =============================================================== @property def is_flat(self): """Return `True`, the cosmology is flat.""" return True @abc.abstractmethod def nonflat(self: _FlatCosmoT) -> _CosmoT: """Return the equivalent non-flat-class instance of this cosmology.""" def clone(self, *, meta: Mapping | None = None, to_nonflat: bool = False, **kwargs): """Returns a copy of this object with updated parameters, as specified. This cannot be used to change the type of the cosmology, except for changing to the non-flat version of this cosmology. Parameters ---------- meta : mapping or None (optional, keyword-only) Metadata that will update the current metadata. to_nonflat : bool, optional keyword-only Whether to change to the non-flat version of this cosmology. **kwargs Cosmology parameter (and name) modifications. If any parameter is changed and a new name is not given, the name will be set to "[old name] (modified)". Returns ------- newcosmo : `~astropy.cosmology.Cosmology` subclass instance A new instance of this class with updated parameters as specified. If no arguments are given, then a reference to this object is returned instead of copy. Examples -------- To make a copy of the ``Planck13`` cosmology with a different matter density (``Om0``), and a new name: >>> from astropy.cosmology import Planck13 >>> Planck13.clone(name="Modified Planck 2013", Om0=0.35) FlatLambdaCDM(name="Modified Planck 2013", H0=67.77 km / (Mpc s), Om0=0.35, ... If no name is specified, the new name will note the modification. >>> Planck13.clone(Om0=0.35).name 'Planck13 (modified)' The keyword 'to_nonflat' can be used to clone on the non-flat equivalent cosmology. >>> Planck13.clone(to_nonflat=True) LambdaCDM(name="Planck13", ... >>> Planck13.clone(H0=70, to_nonflat=True) LambdaCDM(name="Planck13 (modified)", H0=70.0 km / (Mpc s), ... """ if to_nonflat: return self.nonflat.clone(meta=meta, **kwargs) return super().clone(meta=meta, **kwargs) # =============================================================== def __equiv__(self, other): """flat-|Cosmology| equivalence. Use `astropy.cosmology.funcs.cosmology_equal` with ``allow_equivalent=True`` for actual checks! Parameters ---------- other : `~astropy.cosmology.Cosmology` subclass instance The object to which to compare for equivalence. Returns ------- bool or `NotImplemented` `True` if ``other`` is of the same class / non-flat class (e.g. |FlatLambdaCDM| and |LambdaCDM|) has matching parameters and parameter values. `False` if ``other`` is of the same class but has different parameters. `NotImplemented` otherwise. """ if isinstance(other, FlatCosmologyMixin): return super().__equiv__(other) # super gets from Cosmology # check if `other` is the non-flat version of this class this makes the # assumption that any further subclass of a flat cosmo keeps the same # physics. if not issubclass(other.__class__, self.__nonflatclass__): return NotImplemented # Check if have equivalent parameters and all parameters in `other` # match those in `self`` and `other`` has no extra parameters. params_eq = ( set(self.__all_parameters__) == set(other.__all_parameters__) # no extra # equal and all( np.all(getattr(self, k) == getattr(other, k)) for k in self.__parameters__ ) # flatness check and other.is_flat ) return params_eq # ----------------------------------------------------------------------------- def __getattr__(attr): from . import flrw if hasattr(flrw, attr) and attr not in ("__path__",): import warnings from astropy.utils.exceptions import AstropyDeprecationWarning warnings.warn( f"`astropy.cosmology.core.{attr}` has been moved (since v5.0) and " f"should be imported as ``from astropy.cosmology import {attr}``." " In future this will raise an exception.", AstropyDeprecationWarning, ) return getattr(flrw, attr) raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
b69028611483c8b5e56a1b458e9da03568246e336b89d9768395d38187dfaef5
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ astropy.cosmology contains classes and functions for cosmological distance measures and other cosmology-related calculations. See the `Astropy documentation <https://docs.astropy.org/en/latest/cosmology/index.html>`_ for more detailed usage examples and references. """ from . import core, flrw, funcs, parameter, units, utils from . import io # needed before 'realizations' # isort: split from . import realizations from .core import * from .flrw import * from .funcs import * from .parameter import * from .realizations import available, default_cosmology from .utils import * __all__ = ( core.__all__ + flrw.__all__ # cosmology classes + realizations.__all__ # instances thereof + ["units"] # utils + funcs.__all__ + parameter.__all__ + utils.__all__ ) def __getattr__(name): """Get realizations using lazy import from `PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_. Raises ------ AttributeError If "name" is not in :mod:`astropy.cosmology.realizations` """ if name not in available: raise AttributeError(f"module {__name__!r} has no attribute {name!r}.") return getattr(realizations, name) def __dir__(): """Directory, including lazily-imported objects.""" return __all__
632549413dfbe7c3b64a2a5c2b28d923fcb8d40c035125cc2c9b85ee56306ed1
# Licensed under a 3-clause BSD style license - see LICENSE.rst import copy import astropy.units as u from astropy.utils.decorators import deprecated_attribute, deprecated_renamed_argument __all__ = ["Parameter"] class Parameter: r"""Cosmological parameter (descriptor). Should only be used with a :class:`~astropy.cosmology.Cosmology` subclass. Parameters ---------- derived : bool (optional, keyword-only) Whether the Parameter is 'derived', default `False`. Derived parameters behave similarly to normal parameters, but are not sorted by the |Cosmology| signature (probably not there) and are not included in all methods. For reference, see ``Ode0`` in ``FlatFLRWMixin``, which removes :math:`\Omega_{de,0}`` as an independent parameter (:math:`\Omega_{de,0} \equiv 1 - \Omega_{tot}`). unit : unit-like or None (optional, keyword-only) The `~astropy.units.Unit` for the Parameter. If None (default) no unit as assumed. equivalencies : `~astropy.units.Equivalency` or sequence thereof Unit equivalencies for this Parameter. fvalidate : callable[[object, object, Any], Any] or str (optional, keyword-only) Function to validate the Parameter value from instances of the cosmology class. If "default", uses default validator to assign units (with equivalencies), if Parameter has units. For other valid string options, see ``Parameter._registry_validators``. 'fvalidate' can also be set through a decorator with :meth:`~astropy.cosmology.Parameter.validator`. fmt : str (optional, keyword-only) `format` specification, used when making string representation of the containing Cosmology. See https://docs.python.org/3/library/string.html#formatspec .. deprecated:: 5.1 doc : str or None (optional, keyword-only) Parameter description. Examples -------- For worked examples see :class:`~astropy.cosmology.FLRW`. """ _registry_validators = {} @deprecated_renamed_argument("fmt", None, since="5.1") def __init__( self, *, derived=False, unit=None, equivalencies=[], fvalidate="default", fmt="", doc=None, ): # attribute name on container cosmology class. # really set in __set_name__, but if Parameter is not init'ed as a # descriptor this ensures that the attributes exist. self._attr_name = self._attr_name_private = None self._derived = derived self._format_spec = str(fmt) # deprecated. self.__doc__ = doc # units stuff self._unit = u.Unit(unit) if unit is not None else None self._equivalencies = equivalencies # Parse registered `fvalidate` self._fvalidate_in = fvalidate # Always store input fvalidate. if callable(fvalidate): pass elif fvalidate in self._registry_validators: fvalidate = self._registry_validators[fvalidate] elif isinstance(fvalidate, str): raise ValueError( f"`fvalidate`, if str, must be in {self._registry_validators.keys()}" ) else: raise TypeError( f"`fvalidate` must be a function or {self._registry_validators.keys()}" ) self._fvalidate = fvalidate def __set_name__(self, cosmo_cls, name): # attribute name on container cosmology class self._attr_name = name self._attr_name_private = "_" + name @property def name(self): """Parameter name.""" return self._attr_name @property def unit(self): """Parameter unit.""" return self._unit @property def equivalencies(self): """Equivalencies used when initializing Parameter.""" return self._equivalencies format_spec = deprecated_attribute("format_spec", since="5.1") @property def derived(self): """Whether the Parameter is derived; true parameters are not.""" return self._derived # ------------------------------------------- # descriptor and property-like methods def __get__(self, cosmology, cosmo_cls=None): # Get from class if cosmology is None: return self # Get from instance return getattr(cosmology, self._attr_name_private) def __set__(self, cosmology, value): """Allows attribute setting once. Raises AttributeError subsequently.""" # Raise error if setting 2nd time. if hasattr(cosmology, self._attr_name_private): raise AttributeError(f"can't set attribute {self._attr_name} again") # Validate value, generally setting units if present value = self.validate(cosmology, copy.deepcopy(value)) # Make the value read-only, if ndarray-like if hasattr(value, "setflags"): value.setflags(write=False) # Set the value on the cosmology setattr(cosmology, self._attr_name_private, value) # ------------------------------------------- # validate value @property def fvalidate(self): """Function to validate a potential value of this Parameter.""" return self._fvalidate def validator(self, fvalidate): """Make new Parameter with custom ``fvalidate``. Note: ``Parameter.fvalidator`` must be the top-most descriptor decorator. Parameters ---------- fvalidate : callable[[type, type, Any], Any] Returns ------- `~astropy.cosmology.Parameter` Copy of this Parameter but with custom ``fvalidate``. """ return self.clone(fvalidate=fvalidate) def validate(self, cosmology, value): """Run the validator on this Parameter. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` instance value : Any The object to validate. Returns ------- Any The output of calling ``fvalidate(cosmology, self, value)`` (yes, that parameter order). """ return self.fvalidate(cosmology, self, value) @classmethod def register_validator(cls, key, fvalidate=None): """Decorator to register a new kind of validator function. Parameters ---------- key : str fvalidate : callable[[object, object, Any], Any] or None, optional Value validation function. Returns ------- ``validator`` or callable[``validator``] if validator is None returns a function that takes and registers a validator. This allows ``register_validator`` to be used as a decorator. """ if key in cls._registry_validators: raise KeyError(f"validator {key!r} already registered with Parameter.") # fvalidate directly passed if fvalidate is not None: cls._registry_validators[key] = fvalidate return fvalidate # for use as a decorator def register(fvalidate): """Register validator function. Parameters ---------- fvalidate : callable[[object, object, Any], Any] Validation function. Returns ------- ``validator`` """ cls._registry_validators[key] = fvalidate return fvalidate return register # ------------------------------------------- def _get_init_arguments(self, processed=False): """Initialization arguments. Parameters ---------- processed : bool Whether to more closely reproduce the input arguments (`False`, default) or the processed arguments (`True`). The former is better for string representations and round-tripping with ``eval(repr())``. Returns ------- dict[str, Any] """ # The keys are added in this order because `repr` prints them in order. kw = { "derived": self.derived, "unit": self.unit, "equivalencies": self.equivalencies, # Validator is always turned into a function, but for ``repr`` it's nice # to know if it was originally a string. "fvalidate": self.fvalidate if processed else self._fvalidate_in, "doc": self.__doc__, } # fmt will issue a deprecation warning if passed, so only passed if # it's not the default. if self._format_spec: kw["fmt"] = self._format_spec return kw def clone(self, **kw): """Clone this `Parameter`, changing any constructor argument. Parameters ---------- **kw Passed to constructor. The current values, eg. ``fvalidate`` are used as the default values, so an empty ``**kw`` is an exact copy. Examples -------- >>> p = Parameter() >>> p Parameter(derived=False, unit=None, equivalencies=[], fvalidate='default', doc=None) >>> p.clone(unit="km") Parameter(derived=False, unit=Unit("km"), equivalencies=[], fvalidate='default', doc=None) """ # Start with defaults, update from kw. kwargs = {**self._get_init_arguments(), **kw} # All initialization failures, like incorrect input are handled by init cloned = type(self)(**kwargs) # Transfer over the __set_name__ stuff. If `clone` is used to make a # new descriptor, __set_name__ will be called again, overwriting this. cloned._attr_name = self._attr_name cloned._attr_name_private = self._attr_name_private return cloned def __eq__(self, other): """Check Parameter equality. Only equal to other Parameter objects. Returns ------- NotImplemented or True `True` if equal, `NotImplemented` otherwise. This allows `other` to be check for equality with ``other.__eq__``. Examples -------- >>> p1, p2 = Parameter(unit="km"), Parameter(unit="km") >>> p1 == p2 True >>> p3 = Parameter(unit="km / s") >>> p3 == p1 False >>> p1 != 2 True """ if not isinstance(other, Parameter): return NotImplemented # Check equality on all `_init_arguments` & `name`. # Need to compare the processed arguments because the inputs are many- # to-one, e.g. `fvalidate` can be a string or the equivalent function. return (self._get_init_arguments(True) == other._get_init_arguments(True)) and ( self.name == other.name ) def __repr__(self): """String representation. ``eval(repr())`` should work, depending if contents like ``fvalidate`` can be similarly round-tripped. """ return "Parameter({})".format( ", ".join(f"{k}={v!r}" for k, v in self._get_init_arguments().items()) ) # =================================================================== # Built-in validators @Parameter.register_validator("default") def _validate_with_unit(cosmology, param, value): """ Default Parameter value validator. Adds/converts units if Parameter has a unit. """ if param.unit is not None: with u.add_enabled_equivalencies(param.equivalencies): value = u.Quantity(value, param.unit) return value @Parameter.register_validator("float") def _validate_to_float(cosmology, param, value): """Parameter value validator with units, and converted to float.""" value = _validate_with_unit(cosmology, param, value) return float(value) @Parameter.register_validator("scalar") def _validate_to_scalar(cosmology, param, value): """""" value = _validate_with_unit(cosmology, param, value) if not value.isscalar: raise ValueError(f"{param.name} is a non-scalar quantity") return value @Parameter.register_validator("non-negative") def _validate_non_negative(cosmology, param, value): """Parameter value validator where value is a positive float.""" value = _validate_to_float(cosmology, param, value) if value < 0.0: raise ValueError(f"{param.name} cannot be negative.") return value
790452b8ebef97c19c836b27903e96157ded873ab48ed303d6d249fdefc8d09a
# Licensed under a 3-clause BSD style license - see LICENSE.rst import functools from math import inf from numbers import Number import numpy as np from astropy.units import Quantity from astropy.utils import isiterable from astropy.utils.decorators import deprecated from . import units as cu __all__ = [] # nothing is publicly scoped __doctest_skip__ = ["inf_like", "vectorize_if_needed"] def vectorize_redshift_method(func=None, nin=1): """Vectorize a method of redshift(s). Parameters ---------- func : callable or None method to wrap. If `None` returns a :func:`functools.partial` with ``nin`` loaded. nin : int Number of positional redshift arguments. Returns ------- wrapper : callable :func:`functools.wraps` of ``func`` where the first ``nin`` arguments are converted from |Quantity| to :class:`numpy.ndarray`. """ # allow for pie-syntax & setting nin if func is None: return functools.partial(vectorize_redshift_method, nin=nin) @functools.wraps(func) def wrapper(self, *args, **kwargs): """ :func:`functools.wraps` of ``func`` where the first ``nin`` arguments are converted from |Quantity| to `numpy.ndarray` or scalar. """ # process inputs # TODO! quantity-aware vectorization can simplify this. zs = [ z if not isinstance(z, Quantity) else z.to_value(cu.redshift) for z in args[:nin] ] # scalar inputs if all(isinstance(z, (Number, np.generic)) for z in zs): return func(self, *zs, *args[nin:], **kwargs) # non-scalar. use vectorized func return wrapper.__vectorized__(self, *zs, *args[nin:], **kwargs) wrapper.__vectorized__ = np.vectorize(func) # attach vectorized function # TODO! use frompyfunc when can solve return type errors return wrapper @deprecated( since="5.0", message=( "vectorize_if_needed has been removed because it constructs a new ufunc on each" " call" ), alternative="use a pre-vectorized function instead for a target array 'z'", ) def vectorize_if_needed(f, *x, **vkw): """Helper function to vectorize scalar functions on array inputs. Parameters ---------- f : callable 'f' must accept positional arguments and no mandatory keyword arguments. *x Arguments into ``f``. **vkw Keyword arguments into :class:`numpy.vectorize`. Examples -------- >>> func = lambda x: x ** 2 >>> vectorize_if_needed(func, 2) 4 >>> vectorize_if_needed(func, [2, 3]) array([4, 9]) """ return np.vectorize(f, **vkw)(*x) if any(map(isiterable, x)) else f(*x) @deprecated( since="5.0", message=( "inf_like has been removed because it duplicates " "functionality provided by numpy.full_like()" ), alternative="Use numpy.full_like(z, numpy.inf) instead for a target array 'z'", ) def inf_like(x): """Return the shape of x with value infinity and dtype='float'. Preserves 'shape' for both array and scalar inputs. But always returns a float array, even if x is of integer type. Parameters ---------- x : scalar or array-like Must work with functions `numpy.isscalar` and `numpy.full_like` (if `x` is not a scalar` Returns ------- `math.inf` or ndarray[float] thereof Returns a scalar `~math.inf` if `x` is a scalar, an array of floats otherwise. Examples -------- >>> inf_like(0.) # float scalar inf >>> inf_like(1) # integer scalar should give float output inf >>> inf_like([0., 1., 2., 3.]) # float list array([inf, inf, inf, inf]) >>> inf_like([0, 1, 2, 3]) # integer list should give float output array([inf, inf, inf, inf]) """ return inf if np.isscalar(x) else np.full_like(x, inf, dtype=float) def aszarr(z): """ Redshift as a `~numbers.Number` or `~numpy.ndarray` / |Quantity| / |Column|. Allows for any ndarray ducktype by checking for attribute "shape". """ if isinstance(z, (Number, np.generic)): # scalars return z elif hasattr(z, "shape"): # ducktypes NumPy array if hasattr(z, "unit"): # Quantity Column return (z << cu.redshift).value # for speed only use enabled equivs return z # not one of the preferred types: Number / array ducktype return Quantity(z, cu.redshift).value
63ef09f9e2289758632c9eafca439b587585fa364fb64b5864177a67e80a2c2a
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Cosmological units and equivalencies. """ # (newline needed for unit summary) import astropy.units as u from astropy.units.utils import generate_unit_summary as _generate_unit_summary __all__ = [ "littleh", "redshift", # redshift equivalencies "dimensionless_redshift", "with_redshift", "redshift_distance", "redshift_hubble", "redshift_temperature", # other equivalencies "with_H0", ] __doctest_requires__ = {("with_redshift", "redshift_distance"): ["scipy"]} _ns = globals() ############################################################################### # Cosmological Units # This is not formally a unit, but is used in that way in many contexts, and # an appropriate equivalency is only possible if it's treated as a unit. redshift = u.def_unit( ["redshift"], prefixes=False, namespace=_ns, doc="Cosmological redshift.", format={"latex": r""}, ) u.def_physical_type(redshift, "redshift") # This is not formally a unit, but is used in that way in many contexts, and # an appropriate equivalency is only possible if it's treated as a unit (see # https://arxiv.org/pdf/1308.4150.pdf for more) # Also note that h or h100 or h_100 would be a better name, but they either # conflict or have numbers in them, which is disallowed littleh = u.def_unit( ["littleh"], namespace=_ns, prefixes=False, doc='Reduced/"dimensionless" Hubble constant', format={"latex": r"h_{100}"}, ) ############################################################################### # Equivalencies def dimensionless_redshift(): """Allow redshift to be 1-to-1 equivalent to dimensionless. It is special compared to other equivalency pairs in that it allows this independent of the power to which the redshift is raised, and independent of whether it is part of a more complicated unit. It is similar to u.dimensionless_angles() in this respect. """ return u.Equivalency([(redshift, None)], "dimensionless_redshift") def redshift_distance(cosmology=None, kind="comoving", **atzkw): """Convert quantities between redshift and distance. Care should be taken to not misinterpret a relativistic, gravitational, etc redshift as a cosmological one. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional A cosmology realization or built-in cosmology's name (e.g. 'Planck18'). If None, will use the default cosmology (controlled by :class:`~astropy.cosmology.default_cosmology`). kind : {'comoving', 'lookback', 'luminosity'} or None, optional The distance type for the Equivalency. Note this does NOT include the angular diameter distance as this distance measure is not monotonic. **atzkw keyword arguments for :func:`~astropy.cosmology.z_at_value` Returns ------- `~astropy.units.equivalencies.Equivalency` Equivalency between redshift and temperature. Examples -------- >>> import astropy.units as u >>> import astropy.cosmology.units as cu >>> from astropy.cosmology import WMAP9 >>> z = 1100 * cu.redshift >>> z.to(u.Mpc, cu.redshift_distance(WMAP9, kind="comoving")) # doctest: +FLOAT_CMP <Quantity 14004.03157418 Mpc> """ from astropy.cosmology import default_cosmology, z_at_value # get cosmology: None -> default and process str / class cosmology = cosmology if cosmology is not None else default_cosmology.get() with default_cosmology.set(cosmology): # if already cosmo, passes through cosmology = default_cosmology.get() allowed_kinds = ("comoving", "lookback", "luminosity") if kind not in allowed_kinds: raise ValueError(f"`kind` is not one of {allowed_kinds}") method = getattr(cosmology, kind + "_distance") def z_to_distance(z): """Redshift to distance.""" return method(z) def distance_to_z(d): """Distance to redshift.""" return z_at_value(method, d << u.Mpc, **atzkw) return u.Equivalency( [(redshift, u.Mpc, z_to_distance, distance_to_z)], "redshift_distance", {"cosmology": cosmology, "distance": kind}, ) def redshift_hubble(cosmology=None, **atzkw): """Convert quantities between redshift and Hubble parameter and little-h. Care should be taken to not misinterpret a relativistic, gravitational, etc redshift as a cosmological one. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional A cosmology realization or built-in cosmology's name (e.g. 'Planck18'). If None, will use the default cosmology (controlled by :class:`~astropy.cosmology.default_cosmology`). **atzkw keyword arguments for :func:`~astropy.cosmology.z_at_value` Returns ------- `~astropy.units.equivalencies.Equivalency` Equivalency between redshift and Hubble parameter and little-h unit. Examples -------- >>> import astropy.units as u >>> import astropy.cosmology.units as cu >>> from astropy.cosmology import WMAP9 >>> z = 1100 * cu.redshift >>> equivalency = cu.redshift_hubble(WMAP9) # construct equivalency >>> z.to(u.km / u.s / u.Mpc, equivalency) # doctest: +FLOAT_CMP <Quantity 1565637.40154275 km / (Mpc s)> >>> z.to(cu.littleh, equivalency) # doctest: +FLOAT_CMP <Quantity 15656.37401543 littleh> """ from astropy.cosmology import default_cosmology, z_at_value # get cosmology: None -> default and process str / class cosmology = cosmology if cosmology is not None else default_cosmology.get() with default_cosmology.set(cosmology): # if already cosmo, passes through cosmology = default_cosmology.get() def z_to_hubble(z): """Redshift to Hubble parameter.""" return cosmology.H(z) def hubble_to_z(H): """Hubble parameter to redshift.""" return z_at_value(cosmology.H, H << (u.km / u.s / u.Mpc), **atzkw) def z_to_littleh(z): """Redshift to :math:`h`-unit Quantity.""" return z_to_hubble(z).to_value(u.km / u.s / u.Mpc) / 100 * littleh def littleh_to_z(h): """:math:`h`-unit Quantity to redshift.""" return hubble_to_z(h * 100) return u.Equivalency( [ (redshift, u.km / u.s / u.Mpc, z_to_hubble, hubble_to_z), (redshift, littleh, z_to_littleh, littleh_to_z), ], "redshift_hubble", {"cosmology": cosmology}, ) def redshift_temperature(cosmology=None, **atzkw): """Convert quantities between redshift and CMB temperature. Care should be taken to not misinterpret a relativistic, gravitational, etc redshift as a cosmological one. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional A cosmology realization or built-in cosmology's name (e.g. 'Planck18'). If None, will use the default cosmology (controlled by :class:`~astropy.cosmology.default_cosmology`). **atzkw keyword arguments for :func:`~astropy.cosmology.z_at_value` Returns ------- `~astropy.units.equivalencies.Equivalency` Equivalency between redshift and temperature. Examples -------- >>> import astropy.units as u >>> import astropy.cosmology.units as cu >>> from astropy.cosmology import WMAP9 >>> z = 1100 * cu.redshift >>> z.to(u.K, cu.redshift_temperature(WMAP9)) <Quantity 3000.225 K> """ from astropy.cosmology import default_cosmology, z_at_value # get cosmology: None -> default and process str / class cosmology = cosmology if cosmology is not None else default_cosmology.get() with default_cosmology.set(cosmology): # if already cosmo, passes through cosmology = default_cosmology.get() def z_to_Tcmb(z): return cosmology.Tcmb(z) def Tcmb_to_z(T): return z_at_value(cosmology.Tcmb, T << u.K, **atzkw) return u.Equivalency( [(redshift, u.K, z_to_Tcmb, Tcmb_to_z)], "redshift_temperature", {"cosmology": cosmology}, ) def with_redshift( cosmology=None, *, distance="comoving", hubble=True, Tcmb=True, atzkw=None ): """Convert quantities between measures of cosmological distance. Note: by default all equivalencies are on and must be explicitly turned off. Care should be taken to not misinterpret a relativistic, gravitational, etc redshift as a cosmological one. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional A cosmology realization or built-in cosmology's name (e.g. 'Planck18'). If `None`, will use the default cosmology (controlled by :class:`~astropy.cosmology.default_cosmology`). distance : {'comoving', 'lookback', 'luminosity'} or None (optional, keyword-only) The type of distance equivalency to create or `None`. Default is 'comoving'. hubble : bool (optional, keyword-only) Whether to create a Hubble parameter <-> redshift equivalency, using ``Cosmology.H``. Default is `True`. Tcmb : bool (optional, keyword-only) Whether to create a CMB temperature <-> redshift equivalency, using ``Cosmology.Tcmb``. Default is `True`. atzkw : dict or None (optional, keyword-only) keyword arguments for :func:`~astropy.cosmology.z_at_value` Returns ------- `~astropy.units.equivalencies.Equivalency` With equivalencies between redshift and distance / Hubble / temperature. Examples -------- >>> import astropy.units as u >>> import astropy.cosmology.units as cu >>> from astropy.cosmology import WMAP9 >>> equivalency = cu.with_redshift(WMAP9) >>> z = 1100 * cu.redshift Redshift to (comoving) distance: >>> z.to(u.Mpc, equivalency) # doctest: +FLOAT_CMP <Quantity 14004.03157418 Mpc> Redshift to the Hubble parameter: >>> z.to(u.km / u.s / u.Mpc, equivalency) # doctest: +FLOAT_CMP <Quantity 1565637.40154275 km / (Mpc s)> >>> z.to(cu.littleh, equivalency) # doctest: +FLOAT_CMP <Quantity 15656.37401543 littleh> Redshift to CMB temperature: >>> z.to(u.K, equivalency) <Quantity 3000.225 K> """ from astropy.cosmology import default_cosmology # get cosmology: None -> default and process str / class cosmology = cosmology if cosmology is not None else default_cosmology.get() with default_cosmology.set(cosmology): # if already cosmo, passes through cosmology = default_cosmology.get() atzkw = atzkw if atzkw is not None else {} equivs = [] # will append as built # Hubble <-> Redshift if hubble: equivs.extend(redshift_hubble(cosmology, **atzkw)) # CMB Temperature <-> Redshift if Tcmb: equivs.extend(redshift_temperature(cosmology, **atzkw)) # Distance <-> Redshift, but need to choose which distance if distance is not None: equivs.extend(redshift_distance(cosmology, kind=distance, **atzkw)) # ----------- return u.Equivalency( equivs, "with_redshift", {"cosmology": cosmology, "distance": distance, "hubble": hubble, "Tcmb": Tcmb}, ) # =================================================================== def with_H0(H0=None): """ Convert between quantities with little-h and the equivalent physical units. Parameters ---------- H0 : None or `~astropy.units.Quantity` ['frequency'] The value of the Hubble constant to assume. If a `~astropy.units.Quantity`, will assume the quantity *is* ``H0``. If `None` (default), use the ``H0`` attribute from :mod:`~astropy.cosmology.default_cosmology`. References ---------- For an illuminating discussion on why you may or may not want to use little-h at all, see https://arxiv.org/pdf/1308.4150.pdf """ if H0 is None: from .realizations import default_cosmology H0 = default_cosmology.get().H0 h100_val_unit = u.Unit(100 / (H0.to_value((u.km / u.s) / u.Mpc)) * littleh) return u.Equivalency([(h100_val_unit, None)], "with_H0", kwargs={"H0": H0}) # =================================================================== # Enable the set of default equivalencies. # If the cosmology package is imported, this is added to the list astropy-wide. u.add_enabled_equivalencies(dimensionless_redshift()) # ============================================================================= # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. if __doc__ is not None: __doc__ += _generate_unit_summary(_ns)
a0a10fe8cc5ef7744d51ee275cc108905ee6dbc108b290d221e02401a44b4dc3
# Licensed under a 3-clause BSD style license - see LICENSE.rst # STDLIB import pathlib import sys from typing import Optional, Union # LOCAL from astropy.utils.data import get_pkg_data_path from astropy.utils.decorators import deprecated from astropy.utils.state import ScienceState from .core import Cosmology _COSMOLOGY_DATA_DIR = pathlib.Path( get_pkg_data_path("cosmology", "data", package="astropy") ) available = tuple(sorted(p.stem for p in _COSMOLOGY_DATA_DIR.glob("*.ecsv"))) __all__ = ["available", "default_cosmology"] + list(available) __doctest_requires__ = {"*": ["scipy"]} def __getattr__(name): """Make specific realizations from data files with lazy import from `PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_. Raises ------ AttributeError If "name" is not in :mod:`astropy.cosmology.realizations` """ if name not in available: raise AttributeError(f"module {__name__!r} has no attribute {name!r}.") cosmo = Cosmology.read( str(_COSMOLOGY_DATA_DIR / name) + ".ecsv", format="ascii.ecsv" ) cosmo.__doc__ = ( f"{name} instance of {cosmo.__class__.__qualname__} " f"cosmology\n(from {cosmo.meta['reference']})" ) # Cache in this module so `__getattr__` is only called once per `name`. setattr(sys.modules[__name__], name, cosmo) return cosmo def __dir__(): """Directory, including lazily-imported objects.""" return __all__ ######################################################################### # The science state below contains the current cosmology. ######################################################################### class default_cosmology(ScienceState): """The default cosmology to use. To change it:: >>> from astropy.cosmology import default_cosmology, WMAP7 >>> with default_cosmology.set(WMAP7): ... # WMAP7 cosmology in effect ... pass Or, you may use a string:: >>> with default_cosmology.set('WMAP7'): ... # WMAP7 cosmology in effect ... pass To get the default cosmology: >>> default_cosmology.get() FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, ... """ _default_value = "Planck18" _value = "Planck18" @deprecated("5.0", alternative="get") @classmethod def get_cosmology_from_string(cls, arg): """Return a cosmology instance from a string.""" if arg == "no_default": value = None else: value = cls._get_from_registry(arg) return value @classmethod def validate(cls, value: Union[Cosmology, str, None]) -> Optional[Cosmology]: """Return a Cosmology given a value. Parameters ---------- value : None, str, or `~astropy.cosmology.Cosmology` Returns ------- `~astropy.cosmology.Cosmology` instance Raises ------ TypeError If ``value`` is not a string or |Cosmology|. """ # None -> default if value is None: value = cls._default_value # Parse to Cosmology. Error if cannot. if isinstance(value, str): # special-case one string if value == "no_default": value = None else: value = cls._get_from_registry(value) elif not isinstance(value, Cosmology): raise TypeError( "default_cosmology must be a string or Cosmology instance, " f"not {value}." ) return value @classmethod def _get_from_registry(cls, name: str) -> Cosmology: """Get a registered Cosmology realization. Parameters ---------- name : str The built-in |Cosmology| realization to retrieve. Returns ------- `astropy.cosmology.Cosmology` The cosmology realization of `name`. Raises ------ ValueError If ``name`` is a str, but not for a built-in Cosmology. TypeError If ``name`` is for a non-Cosmology object. """ try: value = getattr(sys.modules[__name__], name) except AttributeError: raise ValueError( f"Unknown cosmology {name!r}. Valid cosmologies:\n{available}" ) if not isinstance(value, Cosmology): raise TypeError(f"cannot find a Cosmology realization called {name}.") return value
88384c8d1c0cf0066c2ca2f83d5807bc40ce22a113abe92608b14ef06ea98391
# Licensed under a 3-clause BSD style license - see LICENSE.rst # The BoxLeastSquares periodogram functionality has been moved to # astropy.timeseries.periodograms.bls. The purpose of this file is to provide backward- # compatibility during a transition phase. We can't emit a deprecation warning # simply on import of this module, since the classes are imported into the # top-level astropy.stats, so instead we wrap the main class and emit a # warning during initialization. import warnings from astropy.timeseries.periodograms.bls import ( BoxLeastSquares as TimeseriesBoxLeastSquares, ) from astropy.timeseries.periodograms.bls import ( BoxLeastSquaresResults as TimeseriesBoxLeastSquaresResults, ) from astropy.utils.exceptions import AstropyDeprecationWarning __all__ = ["BoxLeastSquares", "BoxLeastSquaresResults"] class BoxLeastSquares(TimeseriesBoxLeastSquares): """ Compute the box least squares periodogram. This class has been deprecated and will be removed in a future version. Use `astropy.timeseries.BoxLeastSquares` instead. """ def __init__(self, *args, **kwargs): warnings.warn( "Importing BoxLeastSquares from astropy.stats has been " "deprecated and will no longer be supported in future. " "Please import this class from the astropy.timeseries " "module instead", AstropyDeprecationWarning, ) super().__init__(*args, **kwargs) class BoxLeastSquaresResults(TimeseriesBoxLeastSquaresResults): """ The results of a BoxLeastSquares search. This class has been deprecated and will be removed in a future version. Use `astropy.timeseries.BoxLeastSquaresResults` instead. """ def __init__(self, *args, **kwargs): warnings.warn( "Importing BoxLeastSquaresResults from astropy.stats has been " "deprecated and will no longer be supported in future. " "Please import this class from the astropy.timeseries " "module instead", AstropyDeprecationWarning, ) super().__init__(*args, **kwargs)
a661da62b0b4aedc40287980e860fb55246b0d3397f7b87a850b4733e674e65d
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np import pytest from numpy.testing import assert_allclose, assert_equal from astropy.stats.jackknife import jackknife_resampling, jackknife_stats from astropy.utils.compat.optional_deps import HAS_SCIPY def test_jackknife_resampling(): data = np.array([1, 2, 3, 4]) answer = np.array([[2, 3, 4], [1, 3, 4], [1, 2, 4], [1, 2, 3]]) assert_equal(answer, jackknife_resampling(data)) # test jackknife stats, except confidence interval @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_jackknife_stats(): # Test from the third example of Ref.[3] data = np.array((115, 170, 142, 138, 280, 470, 480, 141, 390)) # true estimate, bias, and std_err answer = (258.4444, 0.0, 50.25936) assert_allclose(answer, jackknife_stats(data, np.mean)[0:3], atol=1e-4) # test jackknife stats, including confidence intervals @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_jackknife_stats_conf_interval(): # Test from the first example of Ref.[3] # fmt: off data = np.array( [ 48, 42, 36, 33, 20, 16, 29, 39, 42, 38, 42, 36, 20, 15, 42, 33, 22, 20, 41, 43, 45, 34, 14, 22, 6, 7, 0, 15, 33, 34, 28, 29, 34, 41, 4, 13, 32, 38, 24, 25, 47, 27, 41, 41, 24, 28, 26, 14, 30, 28, 41, 40 ] ) # fmt: on data = np.reshape(data, (-1, 2)) data = data[:, 1] # true estimate, bias, and std_err answer = (113.7862, -4.376391, 22.26572) # calculate the mle of the variance (biased estimator!) def mle_var(x): return np.sum((x - np.mean(x)) * (x - np.mean(x))) / len(x) assert_allclose(answer, jackknife_stats(data, mle_var, 0.95)[0:3], atol=1e-4) # test confidence interval answer = np.array((70.14615, 157.42616)) assert_allclose(answer, jackknife_stats(data, mle_var, 0.95)[3], atol=1e-4) def test_jackknife_stats_exceptions(): with pytest.raises(ValueError): jackknife_stats(np.arange(2), np.mean, confidence_level=42)
f689da31a0d69594f6a8e3a88d385dcd95de35586575ad2c8a7162c1786a06f8
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np import pytest from numpy.testing import assert_allclose from astropy.stats import ( calculate_bin_edges, freedman_bin_width, histogram, knuth_bin_width, scott_bin_width, ) from astropy.utils.compat.optional_deps import HAS_SCIPY def test_scott_bin_width(N=10000, rseed=0): rng = np.random.default_rng(rseed) X = rng.standard_normal(N) delta = scott_bin_width(X) assert_allclose(delta, 3.5 * np.std(X) / N ** (1 / 3)) delta, bins = scott_bin_width(X, return_bins=True) assert_allclose(delta, 3.5 * np.std(X) / N ** (1 / 3)) with pytest.raises(ValueError): scott_bin_width(rng.random((2, 10))) def test_freedman_bin_width(N=10000, rseed=0): rng = np.random.default_rng(rseed) X = rng.standard_normal(N) v25, v75 = np.percentile(X, [25, 75]) delta = freedman_bin_width(X) assert_allclose(delta, 2 * (v75 - v25) / N ** (1 / 3)) delta, bins = freedman_bin_width(X, return_bins=True) assert_allclose(delta, 2 * (v75 - v25) / N ** (1 / 3)) with pytest.raises(ValueError): freedman_bin_width(rng.random((2, 10))) # data with too small IQR test_x = [1, 2, 3] + [4] * 100 + [5, 6, 7] with pytest.raises(ValueError, match=r"Please use another bin method"): with pytest.warns(RuntimeWarning, match=r"divide by zero encountered"): freedman_bin_width(test_x, return_bins=True) # data with small IQR but not too small test_x = np.asarray([1, 2, 3] * 100 + [4] + [5, 6, 7], dtype=np.float32) test_x *= 1.5e-6 delta, bins = freedman_bin_width(test_x, return_bins=True) assert_allclose(delta, 8.923325554510689e-07) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_knuth_bin_width(N=10000, rseed=0): rng = np.random.default_rng(rseed) X = rng.standard_normal(N) dx, bins = knuth_bin_width(X, return_bins=True) assert_allclose(len(bins), 58) dx2 = knuth_bin_width(X) assert dx == dx2 with pytest.raises(ValueError): knuth_bin_width(rng.random((2, 10))) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_knuth_histogram(N=1000, rseed=0): rng = np.random.default_rng(rseed) x = rng.standard_normal(N) counts, bins = histogram(x, "knuth") assert counts.sum() == len(x) assert len(counts) == len(bins) - 1 _bin_types_to_test = [30, "scott", "freedman", "blocks"] if HAS_SCIPY: _bin_types_to_test += ["knuth"] @pytest.mark.parametrize("bin_type", _bin_types_to_test + [np.linspace(-5, 5, 31)]) def test_histogram(bin_type, N=1000, rseed=0): rng = np.random.default_rng(rseed) x = rng.standard_normal(N) counts, bins = histogram(x, bin_type) assert counts.sum() == len(x) assert len(counts) == len(bins) - 1 # Don't include a list of bins as a bin_type here because the effect # of range is different in that case @pytest.mark.parametrize("bin_type", _bin_types_to_test) def test_histogram_range(bin_type, N=1000, rseed=0): # Regression test for #8010 rng = np.random.default_rng(rseed) x = rng.standard_normal(N) range = (0.1, 0.8) bins = calculate_bin_edges(x, bin_type, range=range) assert bins.max() == range[1] assert bins.min() == range[0] def test_histogram_range_with_bins_list(N=1000, rseed=0): # The expected result when the input bins is a list is # the same list on output. rng = np.random.default_rng(rseed) x = rng.standard_normal(N) range = (0.1, 0.8) input_bins = np.linspace(-5, 5, 31) bins = calculate_bin_edges(x, input_bins, range=range) assert all(bins == input_bins) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_histogram_output_knuth(): rng = np.random.default_rng(0) X = rng.standard_normal(100) counts, bins = histogram(X, bins="knuth") assert_allclose(counts, [2, 1, 13, 19, 15, 18, 14, 10, 8]) # fmt: off assert_allclose(bins, [-2.32503077, -1.84420596, -1.36338114, -0.88255632, -0.4017315, 0.07909331, 0.55991813, 1.04074295, 1.52156777, 2.00239258]) # fmt: on def test_histogram_output(): rng = np.random.default_rng(0) X = rng.standard_normal(100) counts, bins = histogram(X, bins=10) assert_allclose(counts, [2, 0, 12, 14, 14, 17, 16, 8, 9, 8]) # fmt: off assert_allclose(bins, [-2.32503077, -1.89228844, -1.4595461, -1.02680377, -0.59406143, -0.1613191, 0.27142324, 0.70416558, 1.13690791, 1.56965025, 2.00239258]) # fmt: on counts, bins = histogram(X, bins="scott") assert_allclose(counts, [2, 14, 27, 25, 16, 16]) # fmt: off assert_allclose(bins, [-2.32503077, -1.59953424, -0.87403771, -0.14854117, 0.57695536, 1.3024519, 2.02794843]) # fmt: on counts, bins = histogram(X, bins="freedman") assert_allclose(counts, [2, 11, 16, 18, 22, 14, 13, 4]) # fmt: off assert_allclose(bins, [-2.32503077, -1.74087192, -1.15671306, -0.5725542, 0.01160465, 0.59576351, 1.17992237, 1.76408122, 2.34824008], rtol=2e-7) # fmt: on counts, bins = histogram(X, bins="blocks") assert_allclose(counts, [3, 97]) assert_allclose(bins, [-2.32503077, -1.37136996, 2.00239258]) def test_histogram_badargs(N=1000, rseed=0): rng = np.random.default_rng(rseed) x = rng.standard_normal(N) # weights is not supported for bins in ["scott", "freedman", "blocks"]: with pytest.raises(NotImplementedError): histogram(x, bins, weights=x) # bad bins arg gives ValueError with pytest.raises(ValueError): histogram(x, bins="bad_argument")
3389bf80b51d022c4a1c11fbed87fbeea34e733bbca32985b6bc547b6e063f7e
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np import pytest from numpy.testing import assert_allclose, assert_equal from astropy import units as u from astropy.stats import mad_std from astropy.stats.sigma_clipping import SigmaClip, sigma_clip, sigma_clipped_stats from astropy.utils.compat.optional_deps import HAS_SCIPY from astropy.utils.exceptions import AstropyUserWarning from astropy.utils.misc import NumpyRNGContext def test_sigma_clip(): # need to seed the numpy RNG to make sure we don't get some # amazingly flukey random number that breaks one of the tests with NumpyRNGContext(12345): # Amazing, I've got the same combination on my luggage! randvar = np.random.randn(10000) filtered_data = sigma_clip(randvar, sigma=1, maxiters=2) assert sum(filtered_data.mask) > 0 assert sum(~filtered_data.mask) < randvar.size # this is actually a silly thing to do, because it uses the # standard deviation as the variance, but it tests to make sure # these arguments are actually doing something filtered_data2 = sigma_clip(randvar, sigma=1, maxiters=2, stdfunc=np.var) assert not np.all(filtered_data.mask == filtered_data2.mask) filtered_data3 = sigma_clip(randvar, sigma=1, maxiters=2, cenfunc=np.mean) assert not np.all(filtered_data.mask == filtered_data3.mask) # make sure the maxiters=None method works at all. filtered_data = sigma_clip(randvar, sigma=3, maxiters=None) # test copying assert filtered_data.data[0] == randvar[0] filtered_data.data[0] += 1.0 assert filtered_data.data[0] != randvar[0] filtered_data = sigma_clip(randvar, sigma=3, maxiters=None, copy=False) assert filtered_data.data[0] == randvar[0] filtered_data.data[0] += 1.0 assert filtered_data.data[0] == randvar[0] # test axis data = np.arange(5) + np.random.normal(0.0, 0.05, (5, 5)) + np.diag(np.ones(5)) filtered_data = sigma_clip(data, axis=0, sigma=2.3) assert filtered_data.count() == 20 filtered_data = sigma_clip(data, axis=1, sigma=2.3) assert filtered_data.count() == 25 @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_axis_none(): """ For masked=False and axis=None, masked elements should be removed from the result. """ data = np.arange(10.0) data[0] = 100 result = sigma_clip(data, masked=False, axis=None) assert_equal(result, data[1:]) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_compare_to_scipy_sigmaclip(): from scipy import stats # need to seed the numpy RNG to make sure we don't get some # amazingly flukey random number that breaks one of the tests with NumpyRNGContext(12345): randvar = np.random.randn(10000) astropyres = sigma_clip(randvar, sigma=3, maxiters=None, cenfunc=np.mean) scipyres = stats.sigmaclip(randvar, 3, 3)[0] assert astropyres.count() == len(scipyres) assert_equal(astropyres[~astropyres.mask].data, scipyres) def test_sigma_clip_scalar_mask(): """Test that the returned mask is not a scalar.""" data = np.arange(5) result = sigma_clip(data, sigma=100.0, maxiters=1) assert result.mask.shape != () def test_sigma_clip_class(): with NumpyRNGContext(12345): data = np.random.randn(100) data[10] = 1.0e5 sobj = SigmaClip(sigma=1, maxiters=2) sfunc = sigma_clip(data, sigma=1, maxiters=2) assert_equal(sobj(data), sfunc) def test_sigma_clip_mean(): with NumpyRNGContext(12345): data = np.random.normal(0.0, 0.05, (10, 10)) data[2, 2] = 1.0e5 sobj1 = SigmaClip(sigma=1, maxiters=2, cenfunc="mean") sobj2 = SigmaClip(sigma=1, maxiters=2, cenfunc=np.nanmean) assert_equal(sobj1(data), sobj2(data)) assert_equal(sobj1(data, axis=0), sobj2(data, axis=0)) def test_sigma_clip_invalid_cenfunc_stdfunc(): with pytest.raises(ValueError): SigmaClip(cenfunc="invalid") with pytest.raises(ValueError): SigmaClip(stdfunc="invalid") def test_sigma_clipped_stats(): """Test list data with input mask or mask_value (#3268).""" # test list data with mask data = [0, 1] mask = np.array([True, False]) result = sigma_clipped_stats(data, mask=mask) # Check that the result of np.ma.median was converted to a scalar assert isinstance(result[1], float) assert result == (1.0, 1.0, 0.0) result2 = sigma_clipped_stats(data, mask=mask, axis=0) assert_equal(result, result2) # test list data with mask_value result = sigma_clipped_stats(data, mask_value=0.0) assert isinstance(result[1], float) assert result == (1.0, 1.0, 0.0) # test without mask data = [0, 2] result = sigma_clipped_stats(data) assert isinstance(result[1], float) assert result == (1.0, 1.0, 1.0) _data = np.arange(10) data = np.ma.MaskedArray([_data, _data, 10 * _data]) mean = sigma_clip(data, axis=0, sigma=1).mean(axis=0) assert_equal(mean, _data) mean, median, stddev = sigma_clipped_stats(data, axis=0, sigma=1) assert_equal(mean, _data) assert_equal(median, _data) assert_equal(stddev, np.zeros_like(_data)) def test_sigma_clipped_stats_ddof(): with NumpyRNGContext(12345): data = np.random.randn(10000) data[10] = 1.0e5 mean1, median1, stddev1 = sigma_clipped_stats(data) mean2, median2, stddev2 = sigma_clipped_stats(data, std_ddof=1) assert mean1 == mean2 assert median1 == median2 assert_allclose(stddev1, 0.98156805711673156) assert_allclose(stddev2, 0.98161731654802831) def test_invalid_sigma_clip(): """Test sigma_clip of data containing invalid values.""" data = np.ones((5, 5)) data[2, 2] = 1000 data[3, 4] = np.nan data[1, 1] = np.inf data_ma = np.ma.MaskedArray(data) with pytest.warns(AstropyUserWarning, match=r"Input data contains invalid values"): result = sigma_clip(data) with pytest.warns(AstropyUserWarning, match=r"Input data contains invalid values"): result_ma = sigma_clip(data_ma) assert_equal(result.data, result_ma.data) assert_equal(result.mask, result_ma.mask) # Pre #4051 if data contains any NaN or infs sigma_clip returns the # mask containing `False` only or TypeError if data also contains a # masked value. assert result.mask[2, 2] assert result.mask[3, 4] assert result.mask[1, 1] with pytest.warns(AstropyUserWarning, match=r"Input data contains invalid values"): result2 = sigma_clip(data, axis=0) assert result2.mask[1, 1] assert result2.mask[3, 4] with pytest.warns(AstropyUserWarning, match=r"Input data contains invalid values"): result3 = sigma_clip(data, axis=0, copy=False) assert result3.mask[1, 1] assert result3.mask[3, 4] # stats along axis with all nans data[0, :] = np.nan # row of all nans with pytest.warns(AstropyUserWarning, match=r"Input data contains invalid values"): _, minarr, maxarr = sigma_clip(data, axis=1, masked=False, return_bounds=True) assert np.isnan(minarr[0]) assert np.isnan(maxarr[0]) def test_sigmaclip_negative_axis(): """Test that dimensions are expanded correctly even if axis is negative.""" data = np.ones((3, 4)) # without correct expand_dims this would raise a ValueError sigma_clip(data, axis=-1) def test_sigmaclip_fully_masked(): """ Make sure a fully masked array is returned when sigma clipping a fully masked array. """ data = np.ma.MaskedArray( data=[[1.0, 0.0], [0.0, 1.0]], mask=[[True, True], [True, True]] ) clipped_data = sigma_clip(data) assert np.ma.allequal(data, clipped_data) clipped_data = sigma_clip(data, masked=False) assert not isinstance(clipped_data, np.ma.MaskedArray) assert np.all(np.isnan(clipped_data)) clipped_data, low, high = sigma_clip(data, return_bounds=True) assert np.ma.allequal(data, clipped_data) assert np.isnan(low) assert np.isnan(high) def test_sigmaclip_empty_masked(): """ Make sure an empty masked array is returned when sigma clipping an empty masked array. """ data = np.ma.MaskedArray(data=[], mask=[]) clipped_data = sigma_clip(data) assert np.ma.allequal(data, clipped_data) clipped_data, low, high = sigma_clip(data, return_bounds=True) assert np.ma.allequal(data, clipped_data) assert np.isnan(low) assert np.isnan(high) def test_sigmaclip_empty(): """ Make sure an empty array is returned when sigma clipping an empty array. """ data = np.array([]) clipped_data = sigma_clip(data) assert isinstance(clipped_data, np.ma.MaskedArray) assert_equal(data, clipped_data.data) clipped_data, low, high = sigma_clip(data, return_bounds=True) assert_equal(data, clipped_data) assert np.isnan(low) assert np.isnan(high) def test_sigma_clip_axis_tuple_3D(): """Test sigma clipping over a subset of axes (issue #7227).""" data = np.sin(0.78 * np.arange(27)).reshape(3, 3, 3) mask = np.zeros_like(data, dtype=np.bool_) data_t = np.rollaxis(data, 1, 0) mask_t = np.rollaxis(mask, 1, 0) # Loop over what was originally axis 1 and clip each plane directly: for data_plane, mask_plane in zip(data_t, mask_t): mean = data_plane.mean() maxdev = 1.5 * data_plane.std() mask_plane[:] = np.logical_or( data_plane < mean - maxdev, data_plane > mean + maxdev ) # Do the equivalent thing using sigma_clip: result = sigma_clip(data, sigma=1.5, cenfunc=np.mean, maxiters=1, axis=(0, -1)) assert_equal(result.mask, mask) def test_sigmaclip_repr(): sigclip = SigmaClip() sigclip_repr = ( "SigmaClip(sigma=3.0, sigma_lower=3.0, sigma_upper=3.0," " maxiters=5, cenfunc='median', stdfunc='std', " "grow=False)" ) sigclip_str = ( "<SigmaClip>\n sigma: 3.0\n sigma_lower: 3.0\n" " sigma_upper: 3.0\n maxiters: 5\n" " cenfunc: 'median'\n stdfunc: 'std'\n" " grow: False" ) assert repr(sigclip) == sigclip_repr assert str(sigclip) == sigclip_str def test_sigma_clippped_stats_unit(): data = np.array([1, 1]) * u.kpc result = sigma_clipped_stats(data) assert result == (1.0 * u.kpc, 1.0 * u.kpc, 0.0 * u.kpc) def test_sigma_clippped_stats_all_masked(): """ Test sigma_clipped_stats when the input array is completely masked. """ arr = np.ma.MaskedArray(np.arange(10), mask=True) result = sigma_clipped_stats(arr) assert result == (np.ma.masked, np.ma.masked, np.ma.masked) arr = np.ma.MaskedArray(np.zeros(10), mask=False) result = sigma_clipped_stats(arr, mask_value=0.0) assert result == (np.ma.masked, np.ma.masked, np.ma.masked) arr = np.ma.MaskedArray(np.arange(10), mask=False) mask = arr < 20 result = sigma_clipped_stats(arr, mask=mask) assert result == (np.ma.masked, np.ma.masked, np.ma.masked) def test_sigma_clip_masked_data_values(): """ Test that the data values & type returned by sigma_clip are the same as its input when using masked=True (rather than being upcast to float64 & containing NaNs as in issue #10605) and also that the input data get copied or referenced as appropriate. """ data = np.array([-2, 5, -5, -6, 20, 14, 1]) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=None, masked=True, copy=True) assert result.dtype == data.dtype assert_equal(result.data, data) assert not np.shares_memory(result.data, data) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=None, masked=True, copy=False) assert result.dtype == data.dtype assert_equal(result.data, data) assert np.shares_memory(result.data, data) # (The fact that the arrays share memory probably also means they're the # same, but doesn't strictly prove it, eg. one could be reversed.) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=0, masked=True, copy=True) assert result.dtype == data.dtype assert_equal(result.data, data) assert not np.shares_memory(result.data, data) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=0, masked=True, copy=False) assert result.dtype == data.dtype assert_equal(result.data, data) assert np.shares_memory(result.data, data) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_sigma_clip_grow(): """ Test sigma_clip with growth of masking to include the neighbours within a specified radius of deviant values. """ # We could use a random seed here, but enumerating the data guarantees that # we test sigma_clip itself and not random number generation. # fmt: off data = np.array( [ -0.2 , 0.48, -0.52, -0.56, 1.97, 1.39, 0.09, 0.28, 0.77, 1.25, 1.01, -1.3 , 0.27, 0.23, 1.35, 0.89, -2. , -0.37, 1.67, -0.44, -0.54, 0.48, 3.25, -1.02, -0.58, 0.12, 0.3 , 0.52, 0. , 1.34, -0.71, -0.83, -2.37, -1.86, -0.86, 0.56, -1.27, 0.12, -1.06, 0.33, -2.36, -0.2 , -1.54, -0.97, -1.31, 0.29, 0.38, -0.75, 0.33, 1.35, 0.07, 0.25, -0.01, 1. , 1.33, -0.92, -1.55, 0.02, 0.76, -0.66, 0.86, -0.01, 0.05, 0.67, 0.85, -0.96, -0.02, -2.3 , -0.65, -1.22, -1.33, 1.07, 0.72, 0.69, 1. , -0.5 , -0.62, -0.92, -0.73, 0.22, 0.05, -1.16, 0.82, 0.43, 1.01, 1.82, -1. , 0.85, -0.13, 0.91, 0.19, 2.17, -0.11, 2. , 0.03, 0.8 , 0.12, -0.75, 0.58, 0.15, ] ) # fmt: on # Test growth to immediate neighbours in simple 1D case: filtered_data = sigma_clip(data, sigma=2, maxiters=3, grow=1) # Indices of the 26/100 points expected to be masked: # fmt: off expected = np.array( [ 3, 4, 5, 15, 16, 17, 21, 22, 23, 31, 32, 33, 39, 40, 41, 66, 67, 68, 84, 85, 86, 90, 91, 92, 93, 94, ] ) # fmt: on assert np.array_equal(np.where(filtered_data.mask)[0], expected) # Test block growth in 2 of 3 dimensions (as in a 2D model set): data = data.reshape(4, 5, 5) filtered_data = sigma_clip(data, sigma=2.1, maxiters=1, grow=1.5, axis=(1, 2)) # fmt: off expected = np.array( [ [ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, ], [ 3, 3, 3, 4, 4, 4, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 4, 2, 2, 2, 3, 3, 3, 4, 4, 4, 2, 2, 2, 3, 3, 3, 4, 4, 4, ], [ 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 0, 1, 2, 3, 0, 1, 0, 1, 1, 2, 3, 1, 2, 3, 1, 2, 3, 0, 1, 2, 0, 1, 2, 0, 1, 2, ], ] ) # fmt: on assert np.array_equal(np.where(filtered_data.mask), expected) # Test ~spherical growth (of a single very-deviant point) in 3D data: data[1, 2, 2] = 100.0 filtered_data = sigma_clip(data, sigma=3.0, maxiters=1, grow=2.0) # fmt: off expected = np.array( [ [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3 ], [ 1, 1, 1, 2, 2, 2, 3, 3, 3, 0, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 4, 1, 1, 1, 2, 2, 2, 3, 3, 3, 2 ], [ 1, 2, 3, 1, 2, 3, 1, 2, 3, 2, 1, 2, 3, 0, 1, 2, 3, 4, 1, 2, 3, 2, 1, 2, 3, 1, 2, 3, 1, 2, 3, 2 ], ] ) # fmt: on assert np.array_equal(np.where(filtered_data.mask), expected) @pytest.mark.parametrize( ("axis", "bounds_shape"), [ (0, (4, 5, 6, 7)), (1, (3, 5, 6, 7)), (-1, (3, 4, 5, 6)), ((1, 3), (3, 5, 7)), ((3, 1), (3, 5, 7)), ((1, 2, 4), (3, 6)), ], ) def test_sigma_clip_axis_shapes(axis, bounds_shape): # Check the shapes of the output for different use cases with NumpyRNGContext(12345): array = np.random.random((3, 4, 5, 6, 7)) result1 = sigma_clip(array, axis=axis) assert result1.shape == array.shape result2, bound1, bound2 = sigma_clip(array, axis=axis, return_bounds=True) assert result2.shape == array.shape assert bound1.shape == bounds_shape assert bound2.shape == bounds_shape @pytest.mark.parametrize( "dtype", [">f2", "<f2", ">f4", "<f4", ">f8", "<f8", "<i4", ">i8"] ) def test_sigma_clip_dtypes(dtype): # Check the shapes of the output for different use cases with NumpyRNGContext(12345): array = np.random.randint(-5, 5, 1000).astype(float) array[30] = 100 reference = sigma_clip(array, copy=True, masked=False) actual = sigma_clip(array.astype(dtype), copy=True, masked=False) assert_equal(reference, actual) def test_mad_std(): # Check with a small array where we know how the result should differ from std # Choose an array with few elements and a high proportion of outliers since # in this case std and mad_std will be very different. array = np.array([1, 10000, 4, 3, 10000]) # First check with regular std, which shouldn't remove any values result_std = sigma_clip( array, cenfunc="median", stdfunc="std", maxiters=1, sigma=5, masked=False ) assert_equal(result_std, array) # Whereas using mad_std should result in the high values being removed result_mad_std = sigma_clip( array, cenfunc="median", stdfunc="mad_std", maxiters=1, sigma=5, masked=False ) assert_equal(result_mad_std, [1, 4, 3]) # We now check this again but with the axis= keyword set since at the time # of writing this test this relies on a fast C implementation in which we # have re-inplemented mad_std. result_std = sigma_clip( array, cenfunc="median", stdfunc="std", maxiters=1, sigma=5, masked=False, axis=0, ) assert_equal(result_std, array) result_mad_std = sigma_clip( array, cenfunc="median", stdfunc="mad_std", maxiters=1, sigma=5, masked=False, axis=0, ) assert_equal(result_mad_std, [1, np.nan, 4, 3, np.nan]) def test_mad_std_large(): # And now test with a larger array and compare with Python mad_std function with NumpyRNGContext(12345): array = np.random.uniform(-1, 2, (30, 40)) def nan_mad_std(data, axis=None): return mad_std(data, axis=axis, ignore_nan=True) result1 = sigma_clip( array, sigma=2, maxiters=None, stdfunc=nan_mad_std, axis=0, masked=False ) result2 = sigma_clip( array, sigma=2, maxiters=None, stdfunc="mad_std", axis=0, masked=False ) assert_allclose(result1, result2)
59cb765d82bcd4d8dfc7c79f101ec34bf7e37b836885b13a4fde368ee4a2614f
from numpy.testing import assert_allclose from astropy.stats.info_theory import ( akaike_info_criterion, akaike_info_criterion_lsq, bayesian_info_criterion, bayesian_info_criterion_lsq, ) def test_bayesian_info_criterion(): # This test is from an example presented in Ref [1] lnL = (-176.4, -173.0) n_params = (2, 3) n_samples = 100 answer = 2.195 bic_g = bayesian_info_criterion(lnL[0], n_params[0], n_samples) bic_t = bayesian_info_criterion(lnL[1], n_params[1], n_samples) assert_allclose(answer, bic_g - bic_t, atol=1e-1) def test_akaike_info_criterion(): # This test is from an example presented in Ref [2] n_samples = 121 lnL = (-3.54, -4.17) n_params = (6, 5) answer = 0.95 aic_1 = akaike_info_criterion(lnL[0], n_params[0], n_samples) aic_2 = akaike_info_criterion(lnL[1], n_params[1], n_samples) assert_allclose(answer, aic_1 - aic_2, atol=1e-2) def test_akaike_info_criterion_lsq(): # This test is from an example presented in Ref [1] n_samples = 100 n_params = (4, 3, 3) ssr = (25.0, 26.0, 27.0) answer = (-130.21, -128.46, -124.68) assert_allclose( answer[0], akaike_info_criterion_lsq(ssr[0], n_params[0], n_samples), atol=1e-2 ) assert_allclose( answer[1], akaike_info_criterion_lsq(ssr[1], n_params[1], n_samples), atol=1e-2 ) assert_allclose( answer[2], akaike_info_criterion_lsq(ssr[2], n_params[2], n_samples), atol=1e-2 ) def test_bayesian_info_criterion_lsq(): """This test is from: http://www.statoek.wiso.uni-goettingen.de/veranstaltungen/non_semi_models/ AkaikeLsg.pdf Note that in there, they compute a "normalized BIC". Therefore, the answers presented here are recalculated versions based on their values. """ n_samples = 25 n_params = (1, 2, 1) ssr = (48959, 32512, 37980) answer = (192.706, 185.706, 186.360) assert_allclose( answer[0], bayesian_info_criterion_lsq(ssr[0], n_params[0], n_samples), atol=1e-2, ) assert_allclose( answer[1], bayesian_info_criterion_lsq(ssr[1], n_params[1], n_samples), atol=1e-2, ) assert_allclose( answer[2], bayesian_info_criterion_lsq(ssr[2], n_params[2], n_samples), atol=1e-2, )
5ca1a2327faca5017ea39a7901c17751240acefd74a795452c20c5cd5918ab5a
import numpy as np import pytest from numpy.testing import assert_allclose, assert_equal from astropy import units as u from astropy.stats.circstats import ( _length, circcorrcoef, circmean, circmoment, circvar, rayleightest, vonmisesmle, vtest, ) from astropy.utils.compat.optional_deps import HAS_SCIPY def test__length(): # testing against R CircStats package # Ref. [1] pages 6 and 125 weights = np.array([12, 1, 6, 1, 2, 1, 1]) answer = 0.766282 data = np.array([0, 3.6, 36, 72, 108, 169.2, 324]) * u.deg assert_allclose(answer, _length(data, weights=weights), atol=1e-4) def test_circmean(): # testing against R CircStats package # Ref[1], page 23 data = np.array([51, 67, 40, 109, 31, 358]) * u.deg answer = 48.63 * u.deg assert_equal(answer, np.around(circmean(data), 2)) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_circmean_against_scipy(): import scipy.stats # testing against scipy.stats.circmean function # the data is the same as the test before, but in radians data = np.array( [0.89011792, 1.1693706, 0.6981317, 1.90240888, 0.54105207, 6.24827872] ) answer = scipy.stats.circmean(data) assert_equal(np.around(answer, 2), np.around(circmean(data), 2)) def test_circvar(): # testing against R CircStats package # Ref[1], page 23 data = np.array([51, 67, 40, 109, 31, 358]) * u.deg answer = 0.1635635 assert_allclose(answer, circvar(data), atol=1e-4) def test_circmoment(): # testing against R CircStats package # Ref[1], page 23 data = np.array([51, 67, 40, 109, 31, 358]) * u.deg # 2nd, 3rd, and 4th moments # this is the answer given in Ref[1] in radians answer = np.array([1.588121, 1.963919, 2.685556]) answer = np.around(np.rad2deg(answer) * u.deg, 4) result = ( np.around(circmoment(data, p=2)[0], 4), np.around(circmoment(data, p=3)[0], 4), np.around(circmoment(data, p=4)[0], 4), ) assert_equal(answer[0], result[0]) assert_equal(answer[1], result[1]) assert_equal(answer[2], result[2]) # testing lengths answer = np.array([0.4800428, 0.236541, 0.2255761]) assert_allclose( answer, (circmoment(data, p=2)[1], circmoment(data, p=3)[1], circmoment(data, p=4)[1]), atol=1e-4, ) def test_circcorrcoef(): # testing against R CircStats package # Ref[1], page 180 # fmt: off alpha = np.array( [ 356, 97, 211, 232, 343, 292, 157, 302, 335, 302, 324, 85, 324, 340, 157, 238, 254, 146, 232, 122, 329, ] ) * u.deg beta = np.array( [ 119, 162, 221, 259, 270, 29, 97, 292, 40, 313, 94, 45, 47, 108, 221, 270, 119, 248, 270, 45, 23, ] ) * u.deg # fmt: on answer = 0.2704648 assert_allclose(answer, circcorrcoef(alpha, beta), atol=1e-4) def test_rayleightest(): # testing against R CircStats package data = np.array([190.18, 175.48, 155.95, 217.83, 156.36]) * u.deg # answer was obtained through R CircStats function r.test(x) answer = (0.00640418, 0.9202565) result = (rayleightest(data), _length(data)) assert_allclose(answer[0], result[0], atol=1e-4) assert_allclose(answer[1], result[1], atol=1e-4) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_vtest(): # testing against R CircStats package data = np.array([190.18, 175.48, 155.95, 217.83, 156.36]) * u.deg # answer was obtained through R CircStats function v0.test(x) answer = 0.9994725 assert_allclose(answer, vtest(data), atol=1e-5) def test_vonmisesmle(): # testing against R CircStats package # testing non-Quantity # fmt: off data = np.array( [ 3.3699057, 4.0411630, 0.5014477, 2.6223103, 3.7336524, 1.8136389, 4.1566039, 2.7806317, 2.4672173, 2.8493644, ] ) # fmt: on # answer was obtained through R CircStats function vm.ml(x) answer = (3.006514, 1.474132) assert_allclose(answer[0], vonmisesmle(data)[0], atol=1e-5) assert_allclose(answer[1], vonmisesmle(data)[1], atol=1e-5) # testing with Quantity data = np.rad2deg(data) * u.deg answer = np.rad2deg(3.006514) * u.deg assert_equal(np.around(answer, 3), np.around(vonmisesmle(data)[0], 3))
dbd452db128539b6d9ba2b3ea922050c7f2add0e03da59bf0c11888d59274fee
import numpy as np import pytest from numpy.testing import assert_allclose from astropy.stats.spatial import RipleysKEstimator from astropy.utils.misc import NumpyRNGContext a = np.array([[1, 4], [2, 5], [3, 6]]) b = np.array([[-1, 1], [-2, 2], [-3, 3]]) @pytest.mark.parametrize("points, x_min, x_max", [(a, 0, 10), (b, -5, 5)]) def test_ripley_K_implementation(points, x_min, x_max): """ Test against Ripley's K function implemented in R package `spatstat` +-+---------+---------+----------+---------+-+ 6 + * + | | | | 5.5 + + | | | | 5 + * + | | 4.5 + + | | | | 4 + * + +-+---------+---------+----------+---------+-+ 1 1.5 2 2.5 3 +-+---------+---------+----------+---------+-+ 3 + * + | | | | 2.5 + + | | | | 2 + * + | | 1.5 + + | | | | 1 + * + +-+---------+---------+----------+---------+-+ -3 -2.5 -2 -1.5 -1 """ area = 100 r = np.linspace(0, 2.5, 5) Kest = RipleysKEstimator( area=area, x_min=x_min, y_min=x_min, x_max=x_max, y_max=x_max ) ANS_NONE = np.array([0, 0, 0, 66.667, 66.667]) assert_allclose(ANS_NONE, Kest(data=points, radii=r, mode="none"), atol=1e-3) ANS_TRANS = np.array([0, 0, 0, 82.304, 82.304]) assert_allclose( ANS_TRANS, Kest(data=points, radii=r, mode="translation"), atol=1e-3 ) with NumpyRNGContext(123): a = np.random.uniform(low=5, high=10, size=(100, 2)) b = np.random.uniform(low=-5, high=-10, size=(100, 2)) @pytest.mark.parametrize("points", [a, b]) def test_ripley_uniform_property(points): # Ripley's K function without edge-correction converges to the area when # the number of points and the argument radii are large enough, i.e., # K(x) --> area as x --> inf area = 50 Kest = RipleysKEstimator(area=area) r = np.linspace(0, 20, 5) assert_allclose(area, Kest(data=points, radii=r, mode="none")[4]) with NumpyRNGContext(123): a = np.random.uniform(low=0, high=1, size=(500, 2)) b = np.random.uniform(low=-1, high=0, size=(500, 2)) @pytest.mark.parametrize("points, low, high", [(a, 0, 1), (b, -1, 0)]) def test_ripley_large_density(points, low, high): Kest = RipleysKEstimator(area=1, x_min=low, x_max=high, y_min=low, y_max=high) r = np.linspace(0, 0.25, 25) Kpos = Kest.poisson(r) modes = ["ohser", "translation", "ripley"] for m in modes: Kest_r = Kest(data=points, radii=r, mode=m) assert_allclose(Kpos, Kest_r, atol=1e-1) with NumpyRNGContext(123): a = np.random.uniform(low=5, high=10, size=(500, 2)) b = np.random.uniform(low=-10, high=-5, size=(500, 2)) @pytest.mark.parametrize("points, low, high", [(a, 5, 10), (b, -10, -5)]) def test_ripley_modes(points, low, high): Kest = RipleysKEstimator(area=25, x_max=high, y_max=high, x_min=low, y_min=low) r = np.linspace(0, 1.2, 25) Kpos_mean = np.mean(Kest.poisson(r)) modes = ["ohser", "translation", "ripley"] for m in modes: Kest_mean = np.mean(Kest(data=points, radii=r, mode=m)) assert_allclose(Kpos_mean, Kest_mean, atol=1e-1, rtol=1e-1) with NumpyRNGContext(123): a = np.random.uniform(low=0, high=1, size=(50, 2)) b = np.random.uniform(low=-1, high=0, size=(50, 2)) @pytest.mark.parametrize("points, low, high", [(a, 0, 1), (b, -1, 0)]) def test_ripley_large_density_var_width(points, low, high): Kest = RipleysKEstimator(area=1, x_min=low, x_max=high, y_min=low, y_max=high) r = np.linspace(0, 0.25, 25) Kpos = Kest.poisson(r) Kest_r = Kest(data=points, radii=r, mode="var-width") assert_allclose(Kpos, Kest_r, atol=1e-1) with NumpyRNGContext(123): a = np.random.uniform(low=5, high=10, size=(50, 2)) b = np.random.uniform(low=-10, high=-5, size=(50, 2)) @pytest.mark.parametrize("points, low, high", [(a, 5, 10), (b, -10, -5)]) def test_ripley_var_width(points, low, high): Kest = RipleysKEstimator(area=25, x_max=high, y_max=high, x_min=low, y_min=low) r = np.linspace(0, 1.2, 25) Kest_ohser = np.mean(Kest(data=points, radii=r, mode="ohser")) Kest_var_width = np.mean(Kest(data=points, radii=r, mode="var-width")) assert_allclose(Kest_ohser, Kest_var_width, atol=1e-1, rtol=1e-1)
54c82246278c1007a2971fba0ec9cf76992f1f09ffbcbac571473f1f8441f071
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np import pytest from numpy.testing import assert_allclose from astropy.stats import RegularEvents, bayesian_blocks def test_single_change_point(rseed=0): rng = np.random.default_rng(rseed) x = np.concatenate([rng.random(100), 1 + rng.random(200)]) bins = bayesian_blocks(x) assert len(bins) == 3 assert_allclose(bins[1], 0.927289, rtol=0.02) def test_duplicate_events(rseed=0): rng = np.random.default_rng(rseed) t = rng.random(100) t[80:] = t[:20] # Using int array as a regression test for gh-6877 x = np.ones(t.shape, dtype=int) x[:20] += 1 bins1 = bayesian_blocks(t) bins2 = bayesian_blocks(t[:80], x[:80]) assert_allclose(bins1, bins2) def test_measures_fitness_homoscedastic(rseed=0): rng = np.random.default_rng(rseed) t = np.linspace(0, 1, 11) x = np.exp(-0.5 * (t - 0.5) ** 2 / 0.01**2) sigma = 0.05 x = x + sigma * rng.standard_normal(len(x)) bins = bayesian_blocks(t, x, sigma, fitness="measures") assert_allclose(bins, [0, 0.45, 0.55, 1]) def test_measures_fitness_heteroscedastic(): rng = np.random.default_rng(1) t = np.linspace(0, 1, 11) x = np.exp(-0.5 * (t - 0.5) ** 2 / 0.01**2) sigma = 0.02 + 0.02 * rng.random(len(x)) x = x + sigma * rng.standard_normal(len(x)) bins = bayesian_blocks(t, x, sigma, fitness="measures") assert_allclose(bins, [0, 0.45, 0.55, 1]) def test_regular_events(): rng = np.random.default_rng(1234) dt = 0.01 steps = np.concatenate( [np.unique(rng.integers(0, 500, 100)), np.unique(rng.integers(500, 1000, 200))] ) t = dt * steps # string fitness bins1 = bayesian_blocks(t, fitness="regular_events", dt=dt) assert len(bins1) == 3 assert_allclose(bins1[1], 5, rtol=0.05) # class name fitness bins2 = bayesian_blocks(t, fitness=RegularEvents, dt=dt) assert_allclose(bins1, bins2) # class instance fitness bins3 = bayesian_blocks(t, fitness=RegularEvents(dt=dt)) assert_allclose(bins1, bins3) def test_errors(): rng = np.random.default_rng(0) t = rng.random(100) # x must be integer or None for events with pytest.raises(ValueError): bayesian_blocks(t, fitness="events", x=t) # x must be binary for regular events with pytest.raises(ValueError): bayesian_blocks(t, fitness="regular_events", x=10 * t, dt=1) # x must be specified for measures with pytest.raises(ValueError): bayesian_blocks(t, fitness="measures") # sigma cannot be specified without x with pytest.raises(ValueError): bayesian_blocks(t, fitness="events", sigma=0.5) # length of x must match length of t with pytest.raises(ValueError): bayesian_blocks(t, fitness="measures", x=t[:-1]) # repeated values in t fail when x is specified t2 = t.copy() t2[1] = t2[0] with pytest.raises(ValueError): bayesian_blocks(t2, fitness="measures", x=t) # sigma must be broadcastable with x with pytest.raises(ValueError): bayesian_blocks(t, fitness="measures", x=t, sigma=t[:-1]) def test_fitness_function_results(): """Test results for several fitness functions""" rng = np.random.default_rng(42) # Event Data t = rng.standard_normal(100) edges = bayesian_blocks(t, fitness="events") assert_allclose(edges, [-1.95103519, -1.01861547, 0.95442154, 2.1416476]) # Event data with repeats t[80:] = t[:20] edges = bayesian_blocks(t, fitness="events", p0=0.01) assert_allclose(edges, [-1.95103519, -1.08663566, 1.17575682, 2.1416476]) # Regular event data dt = 0.01 t = dt * np.arange(1000) x = np.zeros(len(t)) N = len(t) // 10 x[rng.integers(0, len(t), N)] = 1 x[rng.integers(0, len(t) // 2, N)] = 1 edges = bayesian_blocks(t, x, fitness="regular_events", dt=dt) assert_allclose(edges, [0, 4.365, 4.995, 9.99]) # Measured point data with errors t = 100 * rng.random(20) x = np.exp(-0.5 * (t - 50) ** 2) sigma = 0.1 x_obs = x + sigma * rng.standard_normal(len(x)) edges = bayesian_blocks(t, x_obs, sigma, fitness="measures") expected = [1.39362877, 44.30811196, 49.46626158, 54.37232704, 92.7562551] assert_allclose(edges, expected) # Optional arguments are passed (p0) p0_sel = 0.05 edges = bayesian_blocks(t, x_obs, sigma, fitness="measures", p0=p0_sel) assert_allclose(edges, expected) # Optional arguments are passed (ncp_prior) ncp_prior_sel = 4 - np.log(73.53 * p0_sel * (len(t) ** -0.478)) edges = bayesian_blocks( t, x_obs, sigma, fitness="measures", ncp_prior=ncp_prior_sel ) assert_allclose(edges, expected) # Optional arguments are passed (gamma) gamma_sel = np.exp(-ncp_prior_sel) edges = bayesian_blocks(t, x_obs, sigma, fitness="measures", gamma=gamma_sel) assert_allclose(edges, expected) def test_zero_change_points(rseed=0): """ Ensure that edges contains both endpoints when there are no change points """ np.random.seed(rseed) # Using the failed edge case from # https://github.com/astropy/astropy/issues/8558 values = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2]) bins = bayesian_blocks(values) assert values.min() == bins[0] assert values.max() == bins[-1]
93dc9f589115e7c01ebe3124d64468593bfd906bc39822bc136fa8cb7f690f27
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np import pytest from numpy.testing import assert_allclose, assert_equal from astropy import units as u from astropy.stats import funcs from astropy.utils.compat.optional_deps import HAS_MPMATH, HAS_SCIPY from astropy.utils.misc import NumpyRNGContext def test_median_absolute_deviation(): with NumpyRNGContext(12345): # test that it runs randvar = np.random.randn(10000) mad = funcs.median_absolute_deviation(randvar) # test whether an array is returned if an axis is used randvar = randvar.reshape((10, 1000)) mad = funcs.median_absolute_deviation(randvar, axis=1) assert len(mad) == 10 assert mad.size < randvar.size mad = funcs.median_absolute_deviation(randvar, axis=0) assert len(mad) == 1000 assert mad.size < randvar.size # Test some actual values in a 3 dimensional array x = np.arange(3 * 4 * 5) a = np.array([sum(x[: i + 1]) for i in range(len(x))]).reshape(3, 4, 5) mad = funcs.median_absolute_deviation(a) assert mad == 389.5 mad = funcs.median_absolute_deviation(a, axis=0) assert_allclose( mad, [ [210.0, 230.0, 250.0, 270.0, 290.0], [310.0, 330.0, 350.0, 370.0, 390.0], [410.0, 430.0, 450.0, 470.0, 490.0], [510.0, 530.0, 550.0, 570.0, 590.0], ], ) mad = funcs.median_absolute_deviation(a, axis=1) assert_allclose( mad, [ [27.5, 32.5, 37.5, 42.5, 47.5], [127.5, 132.5, 137.5, 142.5, 147.5], [227.5, 232.5, 237.5, 242.5, 247.5], ], ) mad = funcs.median_absolute_deviation(a, axis=2) assert_allclose( mad, [ [3.0, 8.0, 13.0, 18.0], [23.0, 28.0, 33.0, 38.0], [43.0, 48.0, 53.0, 58.0], ], ) def test_median_absolute_deviation_masked(): # Based on the changes introduces in #4658 # normal masked arrays without masked values are handled like normal # numpy arrays array = np.ma.array([1, 2, 3]) assert funcs.median_absolute_deviation(array) == 1 # masked numpy arrays return something different (rank 0 masked array) # but one can still compare it without np.all! array = np.ma.array([1, 4, 3], mask=[0, 1, 0]) assert funcs.median_absolute_deviation(array) == 1 # Just cross check if that's identical to the function on the unmasked # values only assert funcs.median_absolute_deviation(array) == ( funcs.median_absolute_deviation(array[~array.mask]) ) # Multidimensional masked array array = np.ma.array([[1, 4], [2, 2]], mask=[[1, 0], [0, 0]]) funcs.median_absolute_deviation(array) assert funcs.median_absolute_deviation(array) == 0 # Just to compare it with the data without mask: assert funcs.median_absolute_deviation(array.data) == 0.5 # And check if they are also broadcasted correctly np.testing.assert_array_equal( funcs.median_absolute_deviation(array, axis=0).data, [0, 1] ) np.testing.assert_array_equal( funcs.median_absolute_deviation(array, axis=1).data, [0, 0] ) def test_median_absolute_deviation_nans(): array = np.array([[1, 4, 3, np.nan], [2, 5, np.nan, 4]]) assert_equal( funcs.median_absolute_deviation(array, func=np.nanmedian, axis=1), [1, 1] ) array = np.ma.masked_invalid(array) assert funcs.median_absolute_deviation(array) == 1 def test_median_absolute_deviation_nans_masked(): """ Regression test to ensure ignore_nan=True gives same results for ndarray and masked arrays that contain +/-inf. """ data1 = np.array([1.0, np.nan, 2, np.inf]) data2 = np.ma.masked_array(data1, mask=False) mad1 = funcs.median_absolute_deviation(data1, ignore_nan=True) mad2 = funcs.median_absolute_deviation(data2, ignore_nan=True) assert_equal(mad1, mad2) # ensure that input masked array is not modified assert np.isnan(data2[1]) def test_median_absolute_deviation_multidim_axis(): array = np.ones((5, 4, 3)) * np.arange(5)[:, np.newaxis, np.newaxis] mad1 = funcs.median_absolute_deviation(array, axis=(1, 2)) mad2 = funcs.median_absolute_deviation(array, axis=(2, 1)) assert_equal(mad1, np.zeros(5)) assert_equal(mad1, mad2) def test_median_absolute_deviation_quantity(): # Based on the changes introduces in #4658 # Just a small test that this function accepts Quantities and returns a # quantity a = np.array([1, 16, 5]) * u.m mad = funcs.median_absolute_deviation(a) # Check for the correct unit and that the result is identical to the # result without units. assert mad.unit == a.unit assert mad.value == funcs.median_absolute_deviation(a.value) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_binom_conf_interval(): # Test Wilson and Jeffreys interval for corner cases: # Corner cases: k = 0, k = n, confidence_level = 0., confidence_level = 1. n = 5 k = [0, 4, 5] for conf in [0.0, 0.5, 1.0]: res = funcs.binom_conf_interval(k, n, confidence_level=conf, interval="wilson") assert ((res >= 0.0) & (res <= 1.0)).all() res = funcs.binom_conf_interval( k, n, confidence_level=conf, interval="jeffreys" ) assert ((res >= 0.0) & (res <= 1.0)).all() # Test Jeffreys interval accuracy against table in Brown et al. (2001). # (See `binom_conf_interval` docstring for reference.) k = [0, 1, 2, 3, 4] n = 7 conf = 0.95 result = funcs.binom_conf_interval(k, n, confidence_level=conf, interval="jeffreys") table = np.array( [[0.000, 0.016, 0.065, 0.139, 0.234], [0.292, 0.501, 0.648, 0.766, 0.861]] ) assert_allclose(result, table, atol=1.0e-3, rtol=0.0) # Test scalar version result = np.array( [ funcs.binom_conf_interval( kval, n, confidence_level=conf, interval="jeffreys" ) for kval in k ] ).transpose() assert_allclose(result, table, atol=1.0e-3, rtol=0.0) # Test flat result = funcs.binom_conf_interval(k, n, confidence_level=conf, interval="flat") table = np.array( [ [0.0, 0.03185, 0.08523, 0.15701, 0.24486], [0.36941, 0.52650, 0.65085, 0.75513, 0.84298], ] ) assert_allclose(result, table, atol=1.0e-3, rtol=0.0) # Test Wald interval result = funcs.binom_conf_interval(0, 5, interval="wald") assert_allclose(result, 0.0) # conf interval is [0, 0] when k = 0 result = funcs.binom_conf_interval(5, 5, interval="wald") assert_allclose(result, 1.0) # conf interval is [1, 1] when k = n result = funcs.binom_conf_interval( 500, 1000, confidence_level=0.68269, interval="wald" ) assert_allclose(result[0], 0.5 - 0.5 / np.sqrt(1000.0)) assert_allclose(result[1], 0.5 + 0.5 / np.sqrt(1000.0)) # Test shapes k = 3 n = 7 for interval in ["wald", "wilson", "jeffreys", "flat"]: result = funcs.binom_conf_interval(k, n, interval=interval) assert result.shape == (2,) k = np.array(k) for interval in ["wald", "wilson", "jeffreys", "flat"]: result = funcs.binom_conf_interval(k, n, interval=interval) assert result.shape == (2,) n = np.array(n) for interval in ["wald", "wilson", "jeffreys", "flat"]: result = funcs.binom_conf_interval(k, n, interval=interval) assert result.shape == (2,) k = np.array([1, 3, 5]) for interval in ["wald", "wilson", "jeffreys", "flat"]: result = funcs.binom_conf_interval(k, n, interval=interval) assert result.shape == (2, 3) n = np.array([5, 5, 5]) for interval in ["wald", "wilson", "jeffreys", "flat"]: result = funcs.binom_conf_interval(k, n, interval=interval) assert result.shape == (2, 3) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_binned_binom_proportion(): # Check that it works. nbins = 20 x = np.linspace(0.0, 10.0, 100) # Guarantee an `x` in every bin. success = np.ones(len(x), dtype=bool) bin_ctr, bin_hw, p, perr = funcs.binned_binom_proportion(x, success, bins=nbins) # Check shape of outputs assert bin_ctr.shape == (nbins,) assert bin_hw.shape == (nbins,) assert p.shape == (nbins,) assert perr.shape == (2, nbins) # Check that p is 1 in all bins, since success = True for all `x`. assert (p == 1.0).all() # Check that p is 0 in all bins if success = False for all `x`. success[:] = False bin_ctr, bin_hw, p, perr = funcs.binned_binom_proportion(x, success, bins=nbins) assert (p == 0.0).all() def test_binned_binom_proportion_exception(): with pytest.raises(ValueError): funcs.binned_binom_proportion([0], [1, 2], confidence_level=0.75) def test_signal_to_noise_oir_ccd(): result = funcs.signal_to_noise_oir_ccd(1, 25, 0, 0, 0, 1) assert 5.0 == result # check to make sure gain works result = funcs.signal_to_noise_oir_ccd(1, 5, 0, 0, 0, 1, 5) assert 5.0 == result # now add in sky, dark current, and read noise # make sure the snr goes down result = funcs.signal_to_noise_oir_ccd(1, 25, 1, 0, 0, 1) assert result < 5.0 result = funcs.signal_to_noise_oir_ccd(1, 25, 0, 1, 0, 1) assert result < 5.0 result = funcs.signal_to_noise_oir_ccd(1, 25, 0, 0, 1, 1) assert result < 5.0 # make sure snr increases with time result = funcs.signal_to_noise_oir_ccd(2, 25, 0, 0, 0, 1) assert result > 5.0 def test_bootstrap(): bootarr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) # test general bootstrapping answer = np.array([[7, 4, 8, 5, 7, 0, 3, 7, 8, 5], [4, 8, 8, 3, 6, 5, 2, 8, 6, 2]]) with NumpyRNGContext(42): assert_equal(answer, funcs.bootstrap(bootarr, 2)) # test with a bootfunction with NumpyRNGContext(42): bootresult = np.mean(funcs.bootstrap(bootarr, 10000, bootfunc=np.mean)) assert_allclose(np.mean(bootarr), bootresult, atol=0.01) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_bootstrap_multiple_outputs(): from scipy.stats import spearmanr # test a bootfunc with several output values # return just bootstrapping with one output from bootfunc with NumpyRNGContext(42): bootarr = np.array( [[1, 2, 3, 4, 5, 6, 7, 8, 9, 0], [4, 8, 8, 3, 6, 5, 2, 8, 6, 2]] ).T answer = np.array((0.19425, 0.02094)) def bootfunc(x): return spearmanr(x)[0] bootresult = funcs.bootstrap(bootarr, 2, bootfunc=bootfunc) assert_allclose(answer, bootresult, atol=1e-3) # test a bootfunc with several output values # return just bootstrapping with the second output from bootfunc with NumpyRNGContext(42): bootarr = np.array( [[1, 2, 3, 4, 5, 6, 7, 8, 9, 0], [4, 8, 8, 3, 6, 5, 2, 8, 6, 2]] ).T answer = np.array((0.5907, 0.9541)) def bootfunc(x): return spearmanr(x)[1] bootresult = funcs.bootstrap(bootarr, 2, bootfunc=bootfunc) assert_allclose(answer, bootresult, atol=1e-3) # return just bootstrapping with two outputs from bootfunc with NumpyRNGContext(42): answer = np.array(((0.1942, 0.5907), (0.0209, 0.9541), (0.4286, 0.2165))) def bootfunc(x): return spearmanr(x) bootresult = funcs.bootstrap(bootarr, 3, bootfunc=bootfunc) assert bootresult.shape == (3, 2) assert_allclose(answer, bootresult, atol=1e-3) def test_mad_std(): with NumpyRNGContext(12345): data = np.random.normal(5, 2, size=(100, 100)) assert_allclose(funcs.mad_std(data), 2.0, rtol=0.05) def test_mad_std_scalar_return(): with NumpyRNGContext(12345): data = np.random.normal(5, 2, size=(10, 10)) # make a masked array with no masked points data = np.ma.masked_where(np.isnan(data), data) rslt = funcs.mad_std(data) # want a scalar result, NOT a masked array assert np.isscalar(rslt) data[5, 5] = np.nan rslt = funcs.mad_std(data, ignore_nan=True) assert np.isscalar(rslt) rslt = funcs.mad_std(data) assert np.isscalar(rslt) assert np.isnan(rslt) def test_mad_std_warns(): with NumpyRNGContext(12345): data = np.random.normal(5, 2, size=(10, 10)) data[5, 5] = np.nan rslt = funcs.mad_std(data, ignore_nan=False) assert np.isnan(rslt) @pytest.mark.filterwarnings("ignore:Invalid value encountered in median") def test_mad_std_withnan(): with NumpyRNGContext(12345): data = np.empty([102, 102]) data[:] = np.nan data[1:-1, 1:-1] = np.random.normal(5, 2, size=(100, 100)) assert_allclose(funcs.mad_std(data, ignore_nan=True), 2.0, rtol=0.05) assert np.isnan(funcs.mad_std([1, 2, 3, 4, 5, np.nan])) assert_allclose( funcs.mad_std([1, 2, 3, 4, 5, np.nan], ignore_nan=True), 1.482602218505602 ) def test_mad_std_with_axis(): data = np.array([[1, 2, 3, 4], [4, 3, 2, 1]]) # results follow data symmetry result_axis0 = np.array([2.22390333, 0.74130111, 0.74130111, 2.22390333]) result_axis1 = np.array([1.48260222, 1.48260222]) assert_allclose(funcs.mad_std(data, axis=0), result_axis0) assert_allclose(funcs.mad_std(data, axis=1), result_axis1) def test_mad_std_with_axis_and_nan(): data = np.array([[1, 2, 3, 4, np.nan], [4, 3, 2, 1, np.nan]]) # results follow data symmetry result_axis0 = np.array([2.22390333, 0.74130111, 0.74130111, 2.22390333, np.nan]) result_axis1 = np.array([1.48260222, 1.48260222]) with pytest.warns(RuntimeWarning, match=r"All-NaN slice encountered"): assert_allclose(funcs.mad_std(data, axis=0, ignore_nan=True), result_axis0) assert_allclose(funcs.mad_std(data, axis=1, ignore_nan=True), result_axis1) def test_mad_std_with_axis_and_nan_array_type(): # mad_std should return a masked array if given one, and not otherwise data = np.array([[1, 2, 3, 4, np.nan], [4, 3, 2, 1, np.nan]]) with pytest.warns(RuntimeWarning, match=r"All-NaN slice encountered"): result = funcs.mad_std(data, axis=0, ignore_nan=True) assert not np.ma.isMaskedArray(result) data = np.ma.masked_where(np.isnan(data), data) result = funcs.mad_std(data, axis=0, ignore_nan=True) assert np.ma.isMaskedArray(result) def test_gaussian_fwhm_to_sigma(): fwhm = 2.0 * np.sqrt(2.0 * np.log(2.0)) assert_allclose(funcs.gaussian_fwhm_to_sigma * fwhm, 1.0, rtol=1.0e-6) def test_gaussian_sigma_to_fwhm(): sigma = 1.0 / (2.0 * np.sqrt(2.0 * np.log(2.0))) assert_allclose(funcs.gaussian_sigma_to_fwhm * sigma, 1.0, rtol=1.0e-6) def test_gaussian_sigma_to_fwhm_to_sigma(): assert_allclose(funcs.gaussian_fwhm_to_sigma * funcs.gaussian_sigma_to_fwhm, 1.0) def test_poisson_conf_interval_rootn(): assert_allclose(funcs.poisson_conf_interval(16, interval="root-n"), (12, 20)) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize( "interval", ["root-n-0", "pearson", "sherpagehrels", "frequentist-confidence"] ) def test_poisson_conf_large(interval): n = 100 assert_allclose( funcs.poisson_conf_interval(n, interval="root-n"), funcs.poisson_conf_interval(n, interval=interval), rtol=2e-2, ) def test_poisson_conf_array_rootn0_zero(): n = np.zeros((3, 4, 5)) assert_allclose( funcs.poisson_conf_interval(n, interval="root-n-0"), funcs.poisson_conf_interval(n[0, 0, 0], interval="root-n-0")[ :, None, None, None ] * np.ones_like(n), ) assert not np.any(np.isnan(funcs.poisson_conf_interval(n, interval="root-n-0"))) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_poisson_conf_array_frequentist_confidence_zero(): n = np.zeros((3, 4, 5)) assert_allclose( funcs.poisson_conf_interval(n, interval="frequentist-confidence"), funcs.poisson_conf_interval(n[0, 0, 0], interval="frequentist-confidence")[ :, None, None, None ] * np.ones_like(n), ) assert not np.any(np.isnan(funcs.poisson_conf_interval(n, interval="root-n-0"))) def test_poisson_conf_list_rootn0_zero(): n = [0, 0, 0] assert_allclose( funcs.poisson_conf_interval(n, interval="root-n-0"), [[0, 0, 0], [1, 1, 1]] ) assert not np.any(np.isnan(funcs.poisson_conf_interval(n, interval="root-n-0"))) def test_poisson_conf_array_rootn0(): n = 7 * np.ones((3, 4, 5)) assert_allclose( funcs.poisson_conf_interval(n, interval="root-n-0"), funcs.poisson_conf_interval(n[0, 0, 0], interval="root-n-0")[ :, None, None, None ] * np.ones_like(n), ) n[1, 2, 3] = 0 assert not np.any(np.isnan(funcs.poisson_conf_interval(n, interval="root-n-0"))) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_poisson_conf_array_fc(): n = 7 * np.ones((3, 4, 5)) assert_allclose( funcs.poisson_conf_interval(n, interval="frequentist-confidence"), funcs.poisson_conf_interval(n[0, 0, 0], interval="frequentist-confidence")[ :, None, None, None ] * np.ones_like(n), ) n[1, 2, 3] = 0 assert not np.any( np.isnan(funcs.poisson_conf_interval(n, interval="frequentist-confidence")) ) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_poisson_conf_frequentist_confidence_gehrels(): """Test intervals against those published in Gehrels 1986""" nlh = np.array( [ (0, 0, 1.841), (1, 0.173, 3.300), (2, 0.708, 4.638), (3, 1.367, 5.918), (4, 2.086, 7.163), (5, 2.840, 8.382), (6, 3.620, 9.584), (7, 4.419, 10.77), (8, 5.232, 11.95), (9, 6.057, 13.11), (10, 6.891, 14.27), ] ) assert_allclose( funcs.poisson_conf_interval(nlh[:, 0], interval="frequentist-confidence"), nlh[:, 1:].T, rtol=0.001, atol=0.001, ) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_poisson_conf_frequentist_confidence_gehrels_2sigma(): """Test intervals against those published in Gehrels 1986 Note: I think there's a typo (transposition of digits) in Gehrels 1986, specifically for the two-sigma lower limit for 3 events; they claim 0.569 but this function returns 0.59623... """ nlh = np.array( [ (0, 2, 0, 3.783), (1, 2, 2.30e-2, 5.683), (2, 2, 0.230, 7.348), (3, 2, 0.596, 8.902), (4, 2, 1.058, 10.39), (5, 2, 1.583, 11.82), (6, 2, 2.153, 13.22), (7, 2, 2.758, 14.59), (8, 2, 3.391, 15.94), (9, 2, 4.046, 17.27), (10, 2, 4.719, 18.58), ] ) assert_allclose( funcs.poisson_conf_interval( nlh[:, 0], sigma=2, interval="frequentist-confidence" ).T, nlh[:, 2:], rtol=0.01, ) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_poisson_conf_frequentist_confidence_gehrels_3sigma(): """Test intervals against those published in Gehrels 1986""" nlh = np.array( [ (0, 3, 0, 6.608), (1, 3, 1.35e-3, 8.900), (2, 3, 5.29e-2, 10.87), (3, 3, 0.212, 12.68), (4, 3, 0.465, 14.39), (5, 3, 0.792, 16.03), (6, 3, 1.175, 17.62), (7, 3, 1.603, 19.17), (8, 3, 2.068, 20.69), (9, 3, 2.563, 22.18), (10, 3, 3.084, 23.64), ] ) assert_allclose( funcs.poisson_conf_interval( nlh[:, 0], sigma=3, interval="frequentist-confidence" ).T, nlh[:, 2:], rtol=0.01, verbose=True, ) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("n", [0, 1, 2, 3, 10, 20, 100]) def test_poisson_conf_gehrels86(n): assert_allclose( funcs.poisson_conf_interval(n, interval="sherpagehrels")[1], funcs.poisson_conf_interval(n, interval="frequentist-confidence")[1], rtol=0.02, ) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_scipy_poisson_limit(): """Test that the lower-level routine gives the snae number. Test numbers are from table1 1, 3 in Kraft, Burrows and Nousek in `ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ """ assert_allclose( funcs._scipy_kraft_burrows_nousek(5, 2.5, 0.99), (0, 10.67), rtol=1e-3 ) assert_allclose( funcs._scipy_kraft_burrows_nousek(np.int32(5.0), 2.5, 0.99), (0, 10.67), rtol=1e-3, ) assert_allclose( funcs._scipy_kraft_burrows_nousek(np.int64(5.0), 2.5, 0.99), (0, 10.67), rtol=1e-3, ) assert_allclose( funcs._scipy_kraft_burrows_nousek(5, np.float32(2.5), 0.99), (0, 10.67), rtol=1e-3, ) assert_allclose( funcs._scipy_kraft_burrows_nousek(5, np.float64(2.5), 0.99), (0, 10.67), rtol=1e-3, ) assert_allclose( funcs._scipy_kraft_burrows_nousek(5, 2.5, np.float32(0.99)), (0, 10.67), rtol=1e-3, ) assert_allclose( funcs._scipy_kraft_burrows_nousek(5, 2.5, np.float64(0.99)), (0, 10.67), rtol=1e-3, ) conf = funcs.poisson_conf_interval( [5, 6], "kraft-burrows-nousek", background=[2.5, 2.0], confidence_level=[0.99, 0.9], ) assert_allclose(conf[:, 0], (0, 10.67), rtol=1e-3) assert_allclose(conf[:, 1], (0.81, 8.99), rtol=5e-3) @pytest.mark.skipif(not HAS_MPMATH, reason="requires mpmath") def test_mpmath_poisson_limit(): assert_allclose( funcs._mpmath_kraft_burrows_nousek(1.0, 0.1, 0.99), (0.00, 6.54), rtol=5e-3 ) assert_allclose( funcs._mpmath_kraft_burrows_nousek(1.0, 0.5, 0.95), (0.00, 4.36), rtol=5e-3 ) assert_allclose( funcs._mpmath_kraft_burrows_nousek(5.0, 0.0, 0.99), (1.17, 13.32), rtol=5e-3 ) assert_allclose( funcs._mpmath_kraft_burrows_nousek(5.0, 2.5, 0.99), (0, 10.67), rtol=1e-3 ) assert_allclose( funcs._mpmath_kraft_burrows_nousek(np.int32(6), 2.0, 0.9), (0.81, 8.99), rtol=5e-3, ) assert_allclose( funcs._mpmath_kraft_burrows_nousek(np.int64(6), 2.0, 0.9), (0.81, 8.99), rtol=5e-3, ) assert_allclose( funcs._mpmath_kraft_burrows_nousek(6.0, np.float32(2.0), 0.9), (0.81, 8.99), rtol=5e-3, ) assert_allclose( funcs._mpmath_kraft_burrows_nousek(6.0, np.float64(2.0), 0.9), (0.81, 8.99), rtol=5e-3, ) assert_allclose( funcs._mpmath_kraft_burrows_nousek(6.0, 2.0, np.float32(0.9)), (0.81, 8.99), rtol=5e-3, ) assert_allclose( funcs._mpmath_kraft_burrows_nousek(6.0, 2.0, np.float64(0.9)), (0.81, 8.99), rtol=5e-3, ) assert_allclose( funcs._mpmath_kraft_burrows_nousek(5.0, 2.5, 0.99), (0, 10.67), rtol=1e-3 ) assert_allclose( funcs.poisson_conf_interval( n=160, background=154.543, confidence_level=0.95, interval="kraft-burrows-nousek", )[:, 0], (0, 30.30454909), ) # For this one we do not have the "true" answer from the publication, # but we want to make sure that it at least runs without error # see https://github.com/astropy/astropy/issues/9596 _ = funcs._mpmath_kraft_burrows_nousek(1000.0, 900.0, 0.9) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_poisson_conf_value_errors(): with pytest.raises(ValueError, match="Only sigma=1 supported"): funcs.poisson_conf_interval([5, 6], "root-n", sigma=2) with pytest.raises(ValueError, match="background not supported"): funcs.poisson_conf_interval([5, 6], "pearson", background=[2.5, 2.0]) with pytest.raises(ValueError, match="confidence_level not supported"): funcs.poisson_conf_interval( [5, 6], "sherpagehrels", confidence_level=[2.5, 2.0] ) with pytest.raises(ValueError, match="Invalid method"): funcs.poisson_conf_interval(1, "foo") @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_poisson_conf_kbn_value_errors(): with pytest.raises(ValueError, match="number between 0 and 1"): funcs.poisson_conf_interval( 5, "kraft-burrows-nousek", background=2.5, confidence_level=99 ) with pytest.raises(ValueError, match="Set confidence_level for method"): funcs.poisson_conf_interval(5, "kraft-burrows-nousek", background=2.5) with pytest.raises(ValueError, match="Background must be"): funcs.poisson_conf_interval( 5, "kraft-burrows-nousek", background=-2.5, confidence_level=0.99 ) with pytest.raises(TypeError, match="Number of counts must be integer"): funcs.poisson_conf_interval( 5.0, "kraft-burrows-nousek", background=2.5, confidence_level=0.99 ) with pytest.raises(TypeError, match="Number of counts must be integer"): funcs.poisson_conf_interval( [5.0, 6.0], "kraft-burrows-nousek", background=[2.5, 2.0], confidence_level=[0.99, 0.9], ) @pytest.mark.skipif(HAS_SCIPY or HAS_MPMATH, reason="requires neither scipy nor mpmath") def test_poisson_limit_nodependencies(): with pytest.raises(ImportError): funcs.poisson_conf_interval( 20, interval="kraft-burrows-nousek", background=10.0, confidence_level=0.95 ) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("N", [10, 100, 1000, 10000]) def test_uniform(N): with NumpyRNGContext(12345): assert funcs.kuiper(np.random.random(N))[1] > 0.01 @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize( "N,M", [(100, 100), (20, 100), (100, 20), (10, 20), (5, 5), (1000, 100)] ) def test_kuiper_two_uniform(N, M): with NumpyRNGContext(12345): assert funcs.kuiper_two(np.random.random(N), np.random.random(M))[1] > 0.01 @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize( "N,M", [(100, 100), (20, 100), (100, 20), (10, 20), (5, 5), (1000, 100)] ) def test_kuiper_two_nonuniform(N, M): with NumpyRNGContext(12345): assert ( funcs.kuiper_two(np.random.random(N) ** 2, np.random.random(M) ** 2)[1] > 0.01 ) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_detect_kuiper_two_different(): with NumpyRNGContext(12345): D, f = funcs.kuiper_two(np.random.random(500) * 0.5, np.random.random(500)) assert f < 0.01 @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize( "N,M", [(100, 100), (20, 100), (100, 20), (10, 20), (5, 5), (1000, 100)] ) def test_fpp_kuiper_two(N, M): from scipy.stats import binom with NumpyRNGContext(12345): R = 100 fpp = 0.05 fps = 0 for i in range(R): D, f = funcs.kuiper_two(np.random.random(N), np.random.random(M)) if f < fpp: fps += 1 assert binom(R, fpp).sf(fps - 1) > 0.005 assert binom(R, fpp).cdf(fps - 1) > 0.005 @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_kuiper_false_positive_probability(): fpp = funcs.kuiper_false_positive_probability(0.5353333333333409, 1500.0) assert fpp == 0 @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_histogram(): from scipy.stats import chi2 with NumpyRNGContext(1234): a, b = 0.3, 3.14 s = np.random.uniform(a, b, 10000) % 1 b, w = funcs.fold_intervals([(a, b, 1.0 / (b - a))]) h = funcs.histogram_intervals(16, b, w) nn, bb = np.histogram(s, bins=len(h), range=(0, 1)) uu = np.sqrt(nn) nn, uu = len(h) * nn / h / len(s), len(h) * uu / h / len(s) c2 = np.sum(((nn - 1) / uu) ** 2) assert chi2(len(h)).cdf(c2) > 0.01 assert chi2(len(h)).sf(c2) > 0.01 @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize( "ii,rr", [ ((4, (0, 1), (1,)), (1, 1, 1, 1)), ((2, (0, 1), (1,)), (1, 1)), ((4, (0, 0.5, 1), (1, 1)), (1, 1, 1, 1)), ((4, (0, 0.5, 1), (1, 2)), (1, 1, 2, 2)), ((3, (0, 0.5, 1), (1, 2)), (1, 1.5, 2)), ], ) def test_histogram_intervals_known(ii, rr): with NumpyRNGContext(1234): assert_allclose(funcs.histogram_intervals(*ii), rr) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize( "N,m,p", [ pytest.param(100, 10000, 0.01, marks=pytest.mark.skip("Test too slow")), pytest.param(300, 10000, 0.001, marks=pytest.mark.skip("Test too slow")), (10, 10000, 0.001), (3, 10000, 0.001), ], ) def test_uniform_binomial(N, m, p): """Check that the false positive probability is right In particular, run m trials with N uniformly-distributed photons and check that the number of false positives is consistent with a binomial distribution. The more trials, the tighter the bounds but the longer the runtime. """ from scipy.stats import binom with NumpyRNGContext(1234): fpps = np.array([funcs.kuiper(np.random.random(N))[1] for i in range(m)]) assert (fpps >= 0).all() assert (fpps <= 1).all() low = binom(n=m, p=p).ppf(0.01) high = binom(n=m, p=p).ppf(0.99) assert low < sum(fpps < p) < high
e0d5588fd9514b2b74b328dabdea7a2177d2ab75738bf3db6746967e42c010f6
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np import pytest from numpy.testing import assert_allclose, assert_array_almost_equal_nulp, assert_equal import astropy.units as u from astropy.stats.biweight import ( biweight_location, biweight_midcorrelation, biweight_midcovariance, biweight_midvariance, biweight_scale, ) from astropy.tests.helper import assert_quantity_allclose from astropy.utils.misc import NumpyRNGContext def test_biweight_location(): with NumpyRNGContext(12345): # test that it runs randvar = np.random.randn(10000) cbl = biweight_location(randvar) assert abs(cbl - 0) < 1e-2 def test_biweight_location_constant(): cbl = biweight_location(np.ones((10, 5))) assert cbl == 1.0 def test_biweight_location_constant_axis_2d(): shape = (10, 5) data = np.ones(shape) cbl = biweight_location(data, axis=0) assert_allclose(cbl, np.ones(shape[1])) cbl = biweight_location(data, axis=1) assert_allclose(cbl, np.ones(shape[0])) val1 = 100.0 val2 = 2.0 val3 = 5.0 data = np.arange(50).reshape(10, 5) data[2] = val1 data[7] = val2 data[8] = [val3, 0.8, val3, -0.8, val3] cbl = biweight_location(data, axis=1) assert_allclose(cbl[2], val1) assert_allclose(cbl[7], val2) assert_allclose(cbl[8], val3) def test_biweight_location_constant_axis_3d(): shape = (10, 5, 2) data = np.ones(shape) cbl = biweight_location(data, axis=0) assert_allclose(cbl, np.ones((shape[1], shape[2]))) cbl = biweight_location(data, axis=1) assert_allclose(cbl, np.ones((shape[0], shape[2]))) cbl = biweight_location(data, axis=2) assert_allclose(cbl, np.ones((shape[0], shape[1]))) def test_biweight_location_small(): bw_loc = biweight_location([1, 3, 5, 500, 2]) assert_allclose(bw_loc, 2.7456117) def test_biweight_location_axis(): """Test a 2D array with the axis keyword.""" with NumpyRNGContext(12345): ny = 100 nx = 200 data = np.random.normal(5, 2, (ny, nx)) bw = biweight_location(data, axis=0) bwi = [] for i in range(nx): bwi.append(biweight_location(data[:, i])) bwi = np.array(bwi) assert_allclose(bw, bwi) bw = biweight_location(data, axis=1) bwi = [] for i in range(ny): bwi.append(biweight_location(data[i, :])) bwi = np.array(bwi) assert_allclose(bw, bwi) def test_biweight_location_axis_3d(): """Test a 3D array with the axis keyword.""" with NumpyRNGContext(12345): nz = 3 ny = 4 nx = 5 data = np.random.normal(5, 2, (nz, ny, nx)) bw = biweight_location(data, axis=0) assert bw.shape == (ny, nx) y = 0 bwi = [] for i in range(nx): bwi.append(biweight_location(data[:, y, i])) bwi = np.array(bwi) assert_allclose(bw[y], bwi) def test_biweight_location_axis_tuple(): """Test a 3D array with a tuple axis keyword.""" data = np.arange(24).reshape(2, 3, 4) data[0, 0] = 100.0 assert_equal(biweight_location(data, axis=0), biweight_location(data, axis=(0,))) assert_equal(biweight_location(data, axis=-1), biweight_location(data, axis=(2,))) assert_equal( biweight_location(data, axis=(0, 1)), biweight_location(data, axis=(1, 0)) ) assert_equal( biweight_location(data, axis=(0, 2)), biweight_location(data, axis=(0, -1)) ) assert_equal( biweight_location(data, axis=(0, 1, 2)), biweight_location(data, axis=(2, 0, 1)) ) assert_equal( biweight_location(data, axis=(0, 1, 2)), biweight_location(data, axis=None) ) @pytest.mark.filterwarnings("ignore:All-NaN slice encountered") @pytest.mark.filterwarnings("ignore:Invalid value encountered in median") def test_biweight_location_ignore_nan(): data1d = np.array([1, 3, 5, 500, 2, np.nan]) data2d = np.array([data1d, data1d]) assert np.isnan(biweight_location(data1d, ignore_nan=False)) biw_expected = biweight_location(data1d[:-1], ignore_nan=False) assert_equal(biweight_location(data1d, ignore_nan=True), biw_expected) assert_equal(biweight_location(data2d, axis=0, ignore_nan=True), data1d) assert_equal( biweight_location(data2d, axis=1, ignore_nan=True), [biw_expected, biw_expected] ) @pytest.mark.filterwarnings("ignore:All-NaN slice encountered") @pytest.mark.filterwarnings("ignore:Invalid value encountered in median") def test_biweight_location_nan(): data1d = np.array([1, 3, 5, 500, 2, np.nan]) all_nan = data1d.copy() all_nan[:] = np.nan data2d = np.array([data1d, data1d, all_nan]) data1d_masked = np.ma.masked_invalid(data1d) data1d_masked.data[0] = np.nan data2d_masked = np.ma.masked_invalid(data2d) assert np.isnan(biweight_location(data1d)) bw_loc = biweight_location(data1d_masked) assert not isinstance(bw_loc, np.ma.MaskedArray) assert np.isnan(biweight_location(data2d)) for axis in (0, 1): assert np.all(np.isnan(biweight_location(data2d, axis=axis))) assert isinstance( biweight_location(data2d_masked, axis=axis), np.ma.MaskedArray ) @pytest.mark.filterwarnings("ignore:All-NaN slice encountered") @pytest.mark.filterwarnings("ignore:Invalid value encountered in median") def test_biweight_location_masked(): data1d = np.array([1, 3, 5, 500, 2, np.nan]) data2d = np.array([data1d, data1d]) data1d_masked = np.ma.masked_invalid(data1d) data2d_masked = np.ma.masked_invalid(data2d) assert_equal( biweight_location(data1d, ignore_nan=True), biweight_location(data1d_masked) ) assert_equal( biweight_location(data2d, ignore_nan=True), biweight_location(data2d_masked) ) bw_loc = biweight_location(data1d_masked) assert_allclose(bw_loc, 2.7456117) assert np.isscalar(bw_loc) bw_loc = biweight_location(data2d, ignore_nan=True, axis=1) bw_loc_masked = biweight_location(data2d_masked, axis=1) assert isinstance(bw_loc_masked, np.ma.MaskedArray) assert ~np.any(bw_loc_masked.mask) # mask is all False assert_equal(bw_loc, bw_loc_masked.data) bw_loc = biweight_location(data2d, ignore_nan=True, axis=0) bw_loc_masked = biweight_location(data2d_masked, axis=0) assert_equal(bw_loc_masked.data[:-1], bw_loc[:-1]) assert bw_loc_masked.mask[-1] # last mask element is True data1d_masked.data[0] = np.nan # unmasked NaN bw_loc = biweight_location(data1d_masked) assert not isinstance(bw_loc, np.ma.MaskedArray) assert np.isscalar(bw_loc) assert np.isnan(bw_loc) assert_equal( biweight_location(data1d_masked, ignore_nan=True), biweight_location(data1d[1:], ignore_nan=True), ) # ensure that input masked array is not modified assert np.isnan(data1d_masked[0]) def test_biweight_scale(): # NOTE: biweight_scale is covered by biweight_midvariance tests data = [1, 3, 5, 500, 2] scl = biweight_scale(data) var = biweight_midvariance(data) assert_allclose(scl, np.sqrt(var)) data = np.ma.masked_invalid([1, 3, 5, 500, 2, np.nan]) data[0] = np.nan scl = biweight_scale(data, ignore_nan=True) var = biweight_midvariance(data, ignore_nan=True) assert_allclose(scl, np.sqrt(var)) def test_biweight_midvariance(): with NumpyRNGContext(12345): # test that it runs randvar = np.random.randn(10000) var = biweight_midvariance(randvar) assert_allclose(var, 1.0, rtol=0.02) def test_biweight_midvariance_small(): data = [1, 3, 5, 500, 2] var = biweight_midvariance(data) assert_allclose(var, 2.9238456) # verified with R var = biweight_midvariance(data, modify_sample_size=True) assert_allclose(var, 2.3390765) def test_biweight_midvariance_5127(): # test a regression introduced in #5127 rand = np.random.default_rng(12345) data = rand.normal(loc=0.0, scale=20.0, size=(100, 100)) var = biweight_midvariance(data) assert_allclose(var, 409.87135608846205) def test_biweight_midvariance_axis(): """Test a 2D array with the axis keyword.""" with NumpyRNGContext(12345): ny = 100 nx = 200 data = np.random.normal(5, 2, (ny, nx)) bw = biweight_midvariance(data, axis=0) bwi = [] for i in range(nx): bwi.append(biweight_midvariance(data[:, i])) bwi = np.array(bwi) assert_allclose(bw, bwi) bw = biweight_midvariance(data, axis=1) bwi = [] for i in range(ny): bwi.append(biweight_midvariance(data[i, :])) bwi = np.array(bwi) assert_allclose(bw, bwi) def test_biweight_midvariance_axis_3d(): """Test a 3D array with the axis keyword.""" with NumpyRNGContext(12345): nz = 3 ny = 4 nx = 5 data = np.random.normal(5, 2, (nz, ny, nx)) bw = biweight_midvariance(data, axis=0) assert bw.shape == (ny, nx) y = 0 bwi = [] for i in range(nx): bwi.append(biweight_midvariance(data[:, y, i])) bwi = np.array(bwi) assert_allclose(bw[y], bwi) @pytest.mark.filterwarnings("ignore:All-NaN slice encountered") @pytest.mark.filterwarnings("ignore:Invalid value encountered in median") def test_biweight_midvariance_ignore_nan(): data1d = np.array([1, 3, 5, 500, 2, np.nan]) data2d = np.array([data1d, data1d]) assert np.isnan(biweight_midvariance(data1d, ignore_nan=False)) biw_var = biweight_midvariance(data1d[:-1], ignore_nan=False) biw_var_nonan = biweight_midvariance(data1d, ignore_nan=True) assert_equal(biw_var_nonan, biw_var) assert_equal( biweight_midvariance(data2d, axis=0, ignore_nan=True), [0.0, 0.0, 0.0, 0.0, 0.0, np.nan], ) assert_equal( biweight_midvariance(data2d, axis=1, ignore_nan=True), [biw_var_nonan, biw_var_nonan], ) @pytest.mark.filterwarnings("ignore:All-NaN slice encountered") @pytest.mark.filterwarnings("ignore:Invalid value encountered in median") def test_biweight_scale_nan(): data1d = np.array([1, 3, 5, 500, 2, np.nan]) all_nan = data1d.copy() all_nan[:] = np.nan data2d = np.array([data1d, data1d, all_nan]) data1d_masked = np.ma.masked_invalid(data1d) data1d_masked.data[0] = np.nan data2d_masked = np.ma.masked_invalid(data2d) assert np.isnan(biweight_scale(data1d)) bw_scl = biweight_scale(data1d_masked) assert not isinstance(bw_scl, np.ma.MaskedArray) assert np.isnan(bw_scl) assert np.isnan(biweight_scale(data2d)) assert_allclose(biweight_scale(data2d_masked), 1.709926, atol=1e-5) for axis in (0, 1): assert np.all(np.isnan(biweight_scale(data2d, axis=axis))) assert isinstance(biweight_scale(data2d_masked, axis=axis), np.ma.MaskedArray) @pytest.mark.filterwarnings("ignore:All-NaN slice encountered") @pytest.mark.filterwarnings("ignore:Invalid value encountered in median") def test_biweight_midvariance_masked(): data1d = np.array([1, 3, 5, 500, 2, np.nan]) data2d = np.array([data1d, data1d]) data1d_masked = np.ma.masked_invalid(data1d) data2d_masked = np.ma.masked_invalid(data2d) assert_allclose( biweight_midvariance(data1d, ignore_nan=True), biweight_midvariance(data1d_masked), ) assert_allclose( biweight_midvariance(data2d, ignore_nan=True), biweight_midvariance(data2d_masked), ) bw_scl = biweight_midvariance(data1d_masked) assert_allclose(bw_scl, 2.9238456) assert np.isscalar(bw_scl) bw_loc = biweight_midvariance(data2d, ignore_nan=True, axis=1) bw_loc_masked = biweight_midvariance(data2d_masked, axis=1) assert isinstance(bw_loc_masked, np.ma.MaskedArray) assert ~np.any(bw_loc_masked.mask) # mask is all False assert_allclose(bw_loc, bw_loc_masked.data) bw_loc = biweight_midvariance(data2d, ignore_nan=True, axis=0) bw_loc_masked = biweight_midvariance(data2d_masked, axis=0) assert_allclose(bw_loc_masked.data[:-1], bw_loc[:-1]) assert bw_loc_masked.mask[-1] # last mask element is True data1d_masked.data[0] = np.nan # unmasked NaN bw_scl = biweight_midvariance(data1d_masked) assert not isinstance(bw_scl, np.ma.MaskedArray) assert np.isscalar(bw_scl) assert np.isnan(bw_scl) assert_allclose( biweight_midvariance(data1d_masked, ignore_nan=True), biweight_midvariance(data1d[1:], ignore_nan=True), ) # ensure that input masked array is not modified assert np.isnan(data1d_masked[0]) def test_biweight_scale_axis_tuple(): """Test a 3D array with a tuple axis keyword.""" data = np.arange(24).reshape(2, 3, 4) data[0, 0] = 100.0 assert_equal(biweight_scale(data, axis=0), biweight_scale(data, axis=(0,))) assert_equal(biweight_scale(data, axis=-1), biweight_scale(data, axis=(2,))) assert_equal(biweight_scale(data, axis=(0, 1)), biweight_scale(data, axis=(1, 0))) assert_equal(biweight_scale(data, axis=(0, 2)), biweight_scale(data, axis=(0, -1))) assert_equal( biweight_scale(data, axis=(0, 1, 2)), biweight_scale(data, axis=(2, 0, 1)) ) assert_equal(biweight_scale(data, axis=(0, 1, 2)), biweight_scale(data, axis=None)) assert_equal( biweight_scale(data, axis=(0, 2), modify_sample_size=True), biweight_scale(data, axis=(0, -1), modify_sample_size=True), ) def test_biweight_midvariance_constant_axis(): bw = biweight_midvariance(np.ones((10, 5))) assert bw == 0.0 def test_biweight_midvariance_constant_axis_2d(): shape = (10, 5) data = np.ones(shape) cbl = biweight_midvariance(data, axis=0) assert_allclose(cbl, np.zeros(shape[1])) cbl = biweight_midvariance(data, axis=1) assert_allclose(cbl, np.zeros(shape[0])) data = np.arange(50).reshape(10, 5) data[2] = 100.0 data[7] = 2.0 data[8] = [5.0, 0.8, 5.0, -0.8, 5.0] bw = biweight_midvariance(data, axis=1) assert_allclose(bw[2], 0.0) assert_allclose(bw[7], 0.0) assert_allclose(bw[8], 0.0) def test_biweight_midvariance_constant_axis_3d(): shape = (10, 5, 2) data = np.ones(shape) cbl = biweight_midvariance(data, axis=0) assert_allclose(cbl, np.zeros((shape[1], shape[2]))) cbl = biweight_midvariance(data, axis=1) assert_allclose(cbl, np.zeros((shape[0], shape[2]))) cbl = biweight_midvariance(data, axis=2) assert_allclose(cbl, np.zeros((shape[0], shape[1]))) def test_biweight_midcovariance_1d(): d = [0, 1, 2] cov = biweight_midcovariance(d) var = biweight_midvariance(d) assert_allclose(cov, [[var]]) def test_biweight_midcovariance_2d(): d = [[0, 1, 2], [2, 1, 0]] cov = biweight_midcovariance(d) val = 0.70121809 assert_allclose(cov, [[val, -val], [-val, val]]) # verified with R d = [[5, 1, 10], [500, 5, 2]] cov = biweight_midcovariance(d) assert_allclose( cov, [[14.54159077, -7.79026256], [-7.79026256, 6.92087252]] # verified with R ) cov = biweight_midcovariance(d, modify_sample_size=True) assert_allclose(cov, [[14.54159077, -5.19350838], [-5.19350838, 4.61391501]]) def test_biweight_midcovariance_constant(): data = np.ones((3, 10)) val3 = 5.0 data[1] = [val3, 0.8, val3, -0.8, val3, val3, val3, 1.0, val3, -0.7] cov = biweight_midcovariance(data) assert_allclose(cov, np.zeros((3, 3))) rng = np.random.default_rng(123) data = rng.random((5, 5)) val3 = 5.0 data[1] = [val3, 0.8, val3, -0.8, val3] cov = biweight_midcovariance(data) assert_allclose(cov[1, :], 0.0) assert_allclose(cov[:, 1], 0.0) def test_biweight_midcovariance_midvariance(): """ Test that biweight_midcovariance diagonal elements agree with biweight_midvariance. """ rng = np.random.default_rng(1) d = rng.normal(0, 2, size=(100, 3)) cov = biweight_midcovariance(d) var = [biweight_midvariance(a) for a in d] assert_allclose(cov.diagonal(), var) cov2 = biweight_midcovariance(d, modify_sample_size=True) var2 = [biweight_midvariance(a, modify_sample_size=True) for a in d] assert_allclose(cov2.diagonal(), var2) def test_midcovariance_shape(): """ Test that biweight_midcovariance raises error with a 3D array. """ d = np.ones(27).reshape(3, 3, 3) with pytest.raises(ValueError, match=r"The input array must be 2D or 1D\."): biweight_midcovariance(d) def test_midcovariance_M_shape(): """ Test that biweight_midcovariance raises error when M is not a scalar or 1D array. """ d = [0, 1, 2] M = [[0, 1], [2, 3]] with pytest.raises(ValueError, match=r"M must be a scalar or 1D array\."): biweight_midcovariance(d, M=M) def test_biweight_midcovariance_symmetric(): """ Regression test to ensure that midcovariance matrix is symmetric when ``modify_sample_size=True`` (see #5972). """ rng = np.random.default_rng(1) d = rng.gamma(2, 2, size=(3, 500)) cov = biweight_midcovariance(d) assert_array_almost_equal_nulp(cov, cov.T, nulp=5) cov = biweight_midcovariance(d, modify_sample_size=True) assert_array_almost_equal_nulp(cov, cov.T, nulp=5) def test_biweight_midcorrelation(): x = [0, 1, 2] y = [2, 1, 0] assert_allclose(biweight_midcorrelation(x, x), 1.0) assert_allclose(biweight_midcorrelation(x, y), -1.0) x = [5, 1, 10, 12.4, 13.2] y = [500, 5, 2, 7.1, 0.9] # verified with R assert_allclose(biweight_midcorrelation(x, y), -0.14411038976763313) def test_biweight_midcorrelation_inputs(): a1 = np.ones((3, 3)) a2 = np.ones(5) a3 = np.ones(7) with pytest.raises(ValueError, match=r"x must be a 1D array\."): biweight_midcorrelation(a1, a2) with pytest.raises(ValueError, match=r"y must be a 1D array\."): biweight_midcorrelation(a2, a1) with pytest.raises(ValueError, match=r"x and y must have the same shape\."): biweight_midcorrelation(a2, a3) def test_biweight_32bit_runtime_warnings(): """Regression test for #6905.""" with NumpyRNGContext(12345): data = np.random.random(100).astype(np.float32) data[50] = 30000.0 biweight_scale(data) biweight_midvariance(data) def test_biweight_scl_var_constant_units(): unit = u.km data = np.ones(10) << unit biwscl = biweight_scale(data) biwvar = biweight_midvariance(data) assert isinstance(biwscl, u.Quantity) assert isinstance(biwvar, u.Quantity) assert_quantity_allclose(biwscl, 0.0 << unit) assert_quantity_allclose(biwvar, 0.0 << unit**2) data = np.ones(10) << unit data[0] = np.nan biwscl = biweight_scale(data) biwvar = biweight_midvariance(data) assert isinstance(biwscl, u.Quantity) assert isinstance(biwvar, u.Quantity) assert_quantity_allclose(biwscl, np.nan << unit) assert_quantity_allclose(biwvar, np.nan << unit**2)
210cfcd8bdd9626f0b68a1ec3630f02de77551004196bfe0fc3f657453331076
# Licensed under a 3-clause BSD style license - see LICENSE.rst # The LombScargle periodogram functionality has been moved to # astropy.timeseries.periodograms.bls. The purpose of this file is to provide backward- # compatibility during a transition phase. We can't emit a deprecation warning # simply on import of this module, since the classes are imported into the # top-level astropy.stats, so instead we wrap the main class and emit a # warning during initialization. import warnings from astropy.timeseries.periodograms.lombscargle import ( LombScargle as TimeseriesLombScargle, ) from astropy.utils.exceptions import AstropyDeprecationWarning __all__ = ["LombScargle"] class LombScargle(TimeseriesLombScargle): """ Compute the Lomb-Scargle Periodogram. This class has been deprecated and will be removed in a future version. Use `astropy.timeseries.LombScargle` instead. """ def __init__(self, *args, **kwargs): warnings.warn( "Importing LombScargle from astropy.stats has been " "deprecated and will no longer be supported in future. " "Please import this class from the astropy.timeseries " "module instead", AstropyDeprecationWarning, ) super().__init__(*args, **kwargs)
1c5b8bd21fe1496707625c077d9fe7038d1872c8abfc5513ddf829a6fc9451c4
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ ``showtable`` is a command-line script based on ``astropy.io`` and ``astropy.table`` for printing ASCII, FITS, HDF5 or VOTable files(s) to the standard output. Example usage of ``showtable``: 1. FITS:: $ showtable astropy/io/fits/tests/data/table.fits target V_mag ------- ----- NGC1001 11.1 NGC1002 12.3 NGC1003 15.2 2. ASCII:: $ showtable astropy/io/ascii/tests/t/simple_csv.csv a b c --- --- --- 1 2 3 4 5 6 3. XML:: $ showtable astropy/io/votable/tests/data/names.xml --max-width 70 col1 col2 col3 ... col15 col16 col17 --- deg deg ... mag mag --- ------------------------- -------- ------- ... ----- ----- ----- SSTGLMC G000.0000+00.1611 0.0000 0.1611 ... -- -- AA 4. Print all the FITS tables in the current directory:: $ showtable *.fits """ import argparse import textwrap import warnings from astropy import log from astropy.table import Table from astropy.utils.exceptions import AstropyUserWarning def showtable(filename, args): """ Read a table and print to the standard output. Parameters ---------- filename : str The path to a FITS file. """ if args.info and args.stats: warnings.warn("--info and --stats cannot be used together", AstropyUserWarning) if any((args.max_lines, args.max_width, args.hide_unit, args.show_dtype)) and ( args.info or args.stats ): warnings.warn( "print parameters are ignored if --info or --stats is used", AstropyUserWarning, ) # these parameters are passed to Table.read if they are specified in the # command-line read_kwargs = ("hdu", "format", "table_id", "delimiter") kwargs = {k: v for k, v in vars(args).items() if k in read_kwargs and v is not None} try: table = Table.read(filename, **kwargs) if args.info: table.info("attributes") elif args.stats: table.info("stats") else: formatter = table.more if args.more else table.pprint formatter( max_lines=args.max_lines, max_width=args.max_width, show_unit=(False if args.hide_unit else None), show_dtype=(True if args.show_dtype else None), ) except OSError as e: log.error(str(e)) def main(args=None): """The main function called by the `showtable` script.""" parser = argparse.ArgumentParser( description=textwrap.dedent( """ Print tables from ASCII, FITS, HDF5, VOTable file(s). The tables are read with 'astropy.table.Table.read' and are printed with 'astropy.table.Table.pprint'. The default behavior is to make the table output fit onto a single screen page. For a long and wide table this will mean cutting out inner rows and columns. To print **all** the rows or columns use ``--max-lines=-1`` or ``max-width=-1``, respectively. The complete list of supported formats can be found at http://astropy.readthedocs.io/en/latest/io/unified.html#built-in-table-readers-writers """ ) ) addarg = parser.add_argument addarg("filename", nargs="+", help="path to one or more files") addarg( "--format", help=( "input table format, should be specified if it " "cannot be automatically detected" ), ) addarg("--more", action="store_true", help="use the pager mode from Table.more") addarg( "--info", action="store_true", help="show information about the table columns" ) addarg( "--stats", action="store_true", help="show statistics about the table columns" ) # pprint arguments pprint_args = parser.add_argument_group("pprint arguments") addarg = pprint_args.add_argument addarg( "--max-lines", type=int, help=( "maximum number of lines in table output (default=screen " "length, -1 for no limit)" ), ) addarg( "--max-width", type=int, help="maximum width in table output (default=screen width, -1 for no limit)", ) addarg( "--hide-unit", action="store_true", help=( "hide the header row for unit (which is shown " "only if one or more columns has a unit)" ), ) addarg( "--show-dtype", action="store_true", help=( "always include a header row for column dtypes " "(otherwise shown only if any column is multidimensional)" ), ) # ASCII-specific arguments ascii_args = parser.add_argument_group("ASCII arguments") addarg = ascii_args.add_argument addarg("--delimiter", help="column delimiter string") # FITS-specific arguments fits_args = parser.add_argument_group("FITS arguments") addarg = fits_args.add_argument addarg("--hdu", help="name of the HDU to show") # HDF5-specific arguments hdf5_args = parser.add_argument_group("HDF5 arguments") addarg = hdf5_args.add_argument addarg("--path", help="the path from which to read the table") # VOTable-specific arguments votable_args = parser.add_argument_group("VOTable arguments") addarg = votable_args.add_argument addarg("--table-id", help="the table to read in") args = parser.parse_args(args) for idx, filename in enumerate(args.filename): if idx > 0: print() showtable(filename, args)
a4d568a7bc87c8f0db6ecf9b3d403a4b1d746b244451e7d2497495ffb5e8595e
# This module handles the definition of mixin 'handlers' which are functions # that given an arbitrary object (e.g. a dask array) will return an object that # can be used as a mixin column. This is useful because it means that users can # then add objects to tables that are not formally mixin columns and where # adding an info attribute is beyond our control. __all__ = ["MixinRegistryError", "register_mixin_handler", "get_mixin_handler"] # The internal dictionary of handlers maps fully qualified names of classes # to a function that can take an object and return a mixin-compatible object. _handlers = {} class MixinRegistryError(Exception): pass def register_mixin_handler(fully_qualified_name, handler, force=False): """ Register a mixin column 'handler'. A mixin column handler is a function that given an arbitrary Python object, will return an object with the .info attribute that can then be used as a mixin column (this can be e.g. a copy of the object with a new attribute, a subclass instance, or a wrapper class - this is left up to the handler). The handler will be used on classes that have an exactly matching fully qualified name. Parameters ---------- fully_qualified_name : str The fully qualified name of the class that the handler can operate on, such as e.g. ``dask.array.core.Array``. handler : func The handler function. force : bool, optional Whether to overwrite any previous handler if there is already one for the same fully qualified name. """ if fully_qualified_name not in _handlers or force: _handlers[fully_qualified_name] = handler else: raise MixinRegistryError( f"Handler for class {fully_qualified_name} is already defined" ) def get_mixin_handler(obj): """ Given an arbitrary object, return the matching mixin handler (if any). Parameters ---------- obj : object or str The object to find a mixin handler for, or a fully qualified name. Returns ------- handler : None or func Then matching handler, if found, or `None` """ if isinstance(obj, str): return _handlers.get(obj, None) else: return _handlers.get( obj.__class__.__module__ + "." + obj.__class__.__name__, None ) # Add built-in handlers to registry. Note that any third-party package imports # required by the handlers should go inside the handler function to delay # the imports until they are actually needed. def dask_handler(arr): from astropy.table.mixins.dask import as_dask_column return as_dask_column(arr) register_mixin_handler("dask.array.core.Array", dask_handler)
a3476ea33d6848d2e54303b23028d0b26ff5f551b9b491a36cc54e650639b39e
import dask.array as da from astropy.utils.data_info import ParentDtypeInfo __all__ = ["as_dask_column"] class DaskInfo(ParentDtypeInfo): @staticmethod def default_format(val): return f"{val.compute()}" class DaskColumn(da.Array): info = DaskInfo() def copy(self): # Array hard-codes the resulting copied array as Array, so need to # overload this since Table tries to copy the array. return as_dask_column(self, info=self.info) def __getitem__(self, item): result = super().__getitem__(item) if isinstance(item, int): return result else: return as_dask_column(result, info=self.info) def insert(self, obj, values, axis=0): return as_dask_column(da.insert(self, obj, values, axis=axis), info=self.info) def as_dask_column(array, info=None): result = DaskColumn(array.dask, array.name, array.chunks, meta=array) if info is not None: result.info = info return result
25a18e158beba81020ca2d34e8fbdd5c09699527cff95b04ee2548acefe1317e
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np import pytest from astropy.table.sorted_array import SortedArray from astropy.table.table import Table @pytest.fixture def array(): # composite index col0 = np.array([x % 2 for x in range(1, 11)]) col1 = np.array([x for x in range(1, 11)]) t = Table([col0, col1]) t = t[t.argsort()] return SortedArray(t, t["col1"].copy()) @pytest.fixture def wide_array(): # array with 100 columns t = Table([[x] * 10 for x in np.arange(100)]) return SortedArray(t, t["col0"].copy()) def test_array_find(array): for i in range(1, 11): print(f"Searching for {i}") assert array.find((i % 2, i)) == [i] assert array.find((1, 4)) == [] def test_array_range(array): assert np.all(array.range((0, 8), (1, 3), (True, True)) == [8, 10, 1, 3]) assert np.all(array.range((0, 8), (1, 3), (False, True)) == [10, 1, 3]) assert np.all(array.range((0, 8), (1, 3), (True, False)) == [8, 10, 1]) def test_wide_array(wide_array): # checks for a previous bug in which the length of a # sliced SortedArray was set to the number of columns # instead of the number of elements in each column first_row = wide_array[:1].data assert np.all(first_row == Table([[x] for x in np.arange(100)]))
c64a74bb00cbf9259e866ba50479f07056e825abaac206596653c4fe3af81749
import numpy as np from astropy.table import np_utils def test_common_dtype(): """ Test that allowed combinations are those expected. """ dtype = [ ("int", int), ("uint8", np.uint8), ("float32", np.float32), ("float64", np.float64), ("str", "S2"), ("uni", "U2"), ("bool", bool), ("object", np.object_), ] arr = np.empty(1, dtype=dtype) fail = set() succeed = set() for name1, type1 in dtype: for name2, type2 in dtype: try: np_utils.common_dtype([arr[name1], arr[name2]]) succeed.add(f"{name1} {name2}") except np_utils.TableMergeError: fail.add(f"{name1} {name2}") # known bad combinations bad = { "str int", "str bool", "uint8 bool", "uint8 str", "object float32", "bool object", "uni uint8", "int str", "bool str", "bool float64", "bool uni", "str float32", "uni float64", "uni object", "bool uint8", "object float64", "float32 bool", "str uint8", "uni bool", "float64 bool", "float64 object", "int bool", "uni int", "uint8 object", "int uni", "uint8 uni", "float32 uni", "object uni", "bool float32", "uni float32", "object str", "int object", "str float64", "object int", "float64 uni", "bool int", "object bool", "object uint8", "float32 object", "str object", "float64 str", "float32 str", } assert fail == bad good = { "float64 int", "int int", "uint8 float64", "uint8 int", "str uni", "float32 float32", "float64 float64", "float64 uint8", "float64 float32", "int uint8", "int float32", "uni str", "int float64", "uint8 float32", "float32 int", "float32 uint8", "bool bool", "uint8 uint8", "str str", "float32 float64", "object object", "uni uni", } assert succeed == good
3c4a446f73145449e3c3a20788e1c553138286717ea1ce6d52984fc52bd38bcf
# Licensed under a 3-clause BSD style license - see LICENSE.rst import copy import gc import os import pathlib import pickle import sys from collections import OrderedDict from io import StringIO import numpy as np import pytest from numpy.testing import assert_allclose, assert_array_equal from astropy import table from astropy import units as u from astropy.coordinates import SkyCoord from astropy.io import fits from astropy.table import ( Column, MaskedColumn, QTable, Table, TableAttribute, TableReplaceWarning, ) from astropy.tests.helper import assert_follows_unicode_guidelines from astropy.time import Time, TimeDelta from astropy.utils.compat import NUMPY_LT_1_25 from astropy.utils.compat.optional_deps import HAS_PANDAS from astropy.utils.data import get_pkg_data_filename from astropy.utils.exceptions import AstropyUserWarning from astropy.utils.tests.test_metadata import MetaBaseTest from .conftest import MIXIN_COLS, MaskedTable @pytest.fixture def home_is_tmpdir(monkeypatch, tmp_path): """ Pytest fixture to run a test case with tilde-prefixed paths. In the tilde-path case, environment variables are temporarily modified so that '~' resolves to the temp directory. """ # For Unix monkeypatch.setenv("HOME", str(tmp_path)) # For Windows monkeypatch.setenv("USERPROFILE", str(tmp_path)) class SetupData: def _setup(self, table_types): self._table_type = table_types.Table self._column_type = table_types.Column @property def a(self): if self._column_type is not None: if not hasattr(self, "_a"): self._a = self._column_type( [1, 2, 3], name="a", format="%d", meta={"aa": [0, 1, 2, 3, 4]} ) return self._a @property def b(self): if self._column_type is not None: if not hasattr(self, "_b"): self._b = self._column_type( [4, 5, 6], name="b", format="%d", meta={"aa": 1} ) return self._b @property def c(self): if self._column_type is not None: if not hasattr(self, "_c"): self._c = self._column_type([7, 8, 9], "c") return self._c @property def d(self): if self._column_type is not None: if not hasattr(self, "_d"): self._d = self._column_type([7, 8, 7], "d") return self._d @property def obj(self): if self._column_type is not None: if not hasattr(self, "_obj"): self._obj = self._column_type([1, "string", 3], "obj", dtype="O") return self._obj @property def t(self): if self._table_type is not None: if not hasattr(self, "_t"): self._t = self._table_type([self.a, self.b]) return self._t @pytest.mark.usefixtures("table_types") class TestSetTableColumn(SetupData): def test_set_row(self, table_types): """Set a row from a tuple of values""" self._setup(table_types) t = table_types.Table([self.a, self.b]) t[1] = (20, 21) assert t["a"][0] == 1 assert t["a"][1] == 20 assert t["a"][2] == 3 assert t["b"][0] == 4 assert t["b"][1] == 21 assert t["b"][2] == 6 def test_set_row_existing(self, table_types): """Set a row from another existing row""" self._setup(table_types) t = table_types.Table([self.a, self.b]) t[0] = t[1] assert t[0][0] == 2 assert t[0][1] == 5 def test_set_row_fail_1(self, table_types): """Set a row from an incorrectly-sized or typed set of values""" self._setup(table_types) t = table_types.Table([self.a, self.b]) with pytest.raises(ValueError): t[1] = (20, 21, 22) with pytest.raises(ValueError): t[1] = 0 def test_set_row_fail_2(self, table_types): """Set a row from an incorrectly-typed tuple of values""" self._setup(table_types) t = table_types.Table([self.a, self.b]) with pytest.raises(ValueError): t[1] = ("abc", "def") def test_set_new_col_new_table(self, table_types): """Create a new column in empty table using the item access syntax""" self._setup(table_types) t = table_types.Table() t["aa"] = self.a # Test that the new column name is 'aa' and that the values match assert np.all(t["aa"] == self.a) assert t.colnames == ["aa"] def test_set_new_col_new_table_quantity(self, table_types): """Create a new column (from a quantity) in empty table using the item access syntax""" self._setup(table_types) t = table_types.Table() t["aa"] = np.array([1, 2, 3]) * u.m assert np.all(t["aa"] == np.array([1, 2, 3])) assert t["aa"].unit == u.m t["bb"] = 3 * u.m assert np.all(t["bb"] == 3) assert t["bb"].unit == u.m def test_set_new_col_existing_table(self, table_types): """Create a new column in an existing table using the item access syntax""" self._setup(table_types) t = table_types.Table([self.a]) # Add a column t["bb"] = self.b assert np.all(t["bb"] == self.b) assert t.colnames == ["a", "bb"] assert t["bb"].meta == self.b.meta assert t["bb"].format == self.b.format # Add another column t["c"] = t["a"] assert np.all(t["c"] == t["a"]) assert t.colnames == ["a", "bb", "c"] assert t["c"].meta == t["a"].meta assert t["c"].format == t["a"].format # Add a multi-dimensional column t["d"] = table_types.Column(np.arange(12).reshape(3, 2, 2)) assert t["d"].shape == (3, 2, 2) assert t["d"][0, 0, 1] == 1 # Add column from a list t["e"] = ["hello", "the", "world"] assert np.all(t["e"] == np.array(["hello", "the", "world"])) # Make sure setting existing column still works t["e"] = ["world", "hello", "the"] assert np.all(t["e"] == np.array(["world", "hello", "the"])) # Add a column via broadcasting t["f"] = 10 assert np.all(t["f"] == 10) # Add a column from a Quantity t["g"] = np.array([1, 2, 3]) * u.m assert np.all(t["g"].data == np.array([1, 2, 3])) assert t["g"].unit == u.m # Add a column from a (scalar) Quantity t["g"] = 3 * u.m assert np.all(t["g"].data == 3) assert t["g"].unit == u.m def test_set_new_unmasked_col_existing_table(self, table_types): """Create a new column in an existing table using the item access syntax""" self._setup(table_types) t = table_types.Table([self.a]) # masked or unmasked b = table.Column(name="b", data=[1, 2, 3]) # unmasked t["b"] = b assert np.all(t["b"] == b) def test_set_new_masked_col_existing_table(self, table_types): """Create a new column in an existing table using the item access syntax""" self._setup(table_types) t = table_types.Table([self.a]) # masked or unmasked b = table.MaskedColumn(name="b", data=[1, 2, 3]) # masked t["b"] = b assert np.all(t["b"] == b) def test_set_new_col_existing_table_fail(self, table_types): """Generate failure when creating a new column using the item access syntax""" self._setup(table_types) t = table_types.Table([self.a]) # Wrong size with pytest.raises(ValueError): t["b"] = [1, 2] @pytest.mark.usefixtures("table_types") class TestEmptyData: def test_1(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name="a", dtype=int, length=100)) assert len(t["a"]) == 100 def test_2(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name="a", dtype=int, shape=(3,), length=100)) assert len(t["a"]) == 100 def test_3(self, table_types): t = table_types.Table() # length is not given t.add_column(table_types.Column(name="a", dtype=int)) assert len(t["a"]) == 0 def test_4(self, table_types): t = table_types.Table() # length is not given t.add_column(table_types.Column(name="a", dtype=int, shape=(3, 4))) assert len(t["a"]) == 0 def test_5(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name="a")) # dtype is not specified assert len(t["a"]) == 0 def test_scalar(self, table_types): """Test related to #3811 where setting empty tables to scalar values should raise an error instead of having an error raised when accessing the table.""" t = table_types.Table() with pytest.raises( TypeError, match="Empty table cannot have column set to scalar value" ): t.add_column(0) def test_add_via_setitem_and_slice(self, table_types): """Test related to #3023 where a MaskedColumn is created with name=None and then gets changed to name='a'. After PR #2790 this test fails without the #3023 fix.""" t = table_types.Table() t["a"] = table_types.Column([1, 2, 3]) t2 = t[:] assert t2.colnames == t.colnames @pytest.mark.usefixtures("table_types") class TestNewFromColumns: def test_simple(self, table_types): cols = [ table_types.Column(name="a", data=[1, 2, 3]), table_types.Column(name="b", data=[4, 5, 6], dtype=np.float32), ] t = table_types.Table(cols) assert np.all(t["a"].data == np.array([1, 2, 3])) assert np.all(t["b"].data == np.array([4, 5, 6], dtype=np.float32)) assert type(t["b"][1]) is np.float32 def test_from_np_array(self, table_types): cols = [ table_types.Column( name="a", data=np.array([1, 2, 3], dtype=np.int64), dtype=np.float64 ), table_types.Column(name="b", data=np.array([4, 5, 6], dtype=np.float32)), ] t = table_types.Table(cols) assert np.all(t["a"] == np.array([1, 2, 3], dtype=np.float64)) assert np.all(t["b"] == np.array([4, 5, 6], dtype=np.float32)) assert type(t["a"][1]) is np.float64 assert type(t["b"][1]) is np.float32 def test_size_mismatch(self, table_types): cols = [ table_types.Column(name="a", data=[1, 2, 3]), table_types.Column(name="b", data=[4, 5, 6, 7]), ] with pytest.raises(ValueError): table_types.Table(cols) def test_name_none(self, table_types): """Column with name=None can init a table whether or not names are supplied""" c = table_types.Column(data=[1, 2], name="c") d = table_types.Column(data=[3, 4]) t = table_types.Table([c, d], names=(None, "d")) assert t.colnames == ["c", "d"] t = table_types.Table([c, d]) assert t.colnames == ["c", "col1"] @pytest.mark.usefixtures("table_types") class TestReverse: def test_reverse(self, table_types): t = table_types.Table( [ [1, 2, 3], ["a", "b", "cc"], ] ) t.reverse() assert np.all(t["col0"] == np.array([3, 2, 1])) assert np.all(t["col1"] == np.array(["cc", "b", "a"])) t2 = table_types.Table(t, copy=False) assert np.all(t2["col0"] == np.array([3, 2, 1])) assert np.all(t2["col1"] == np.array(["cc", "b", "a"])) t2 = table_types.Table(t, copy=True) assert np.all(t2["col0"] == np.array([3, 2, 1])) assert np.all(t2["col1"] == np.array(["cc", "b", "a"])) t2.sort("col0") assert np.all(t2["col0"] == np.array([1, 2, 3])) assert np.all(t2["col1"] == np.array(["a", "b", "cc"])) def test_reverse_big(self, table_types): x = np.arange(10000) y = x + 1 t = table_types.Table([x, y], names=("x", "y")) t.reverse() assert np.all(t["x"] == x[::-1]) assert np.all(t["y"] == y[::-1]) def test_reverse_mixin(self): """Test reverse for a mixin with no item assignment, fix for #9836""" sc = SkyCoord([1, 2], [3, 4], unit="deg") t = Table([[2, 1], sc], names=["a", "sc"]) t.reverse() assert np.all(t["a"] == [1, 2]) assert np.allclose(t["sc"].ra.to_value("deg"), [2, 1]) @pytest.mark.usefixtures("table_types") class TestRound: def test_round_int(self, table_types): t = table_types.Table( [ ["a", "b", "c"], [1.11, 2.3, 3.0], [1.123456, 2.9876, 3.901], ] ) t.round() assert np.all(t["col0"] == ["a", "b", "c"]) assert np.all(t["col1"] == [1.0, 2.0, 3.0]) assert np.all(t["col2"] == [1.0, 3.0, 4.0]) def test_round_dict(self, table_types): t = table_types.Table( [ ["a", "b", "c"], [1.5, 2.5, 3.0111], [1.123456, 2.9876, 3.901], ] ) t.round({"col1": 0, "col2": 3}) assert np.all(t["col0"] == ["a", "b", "c"]) assert np.all(t["col1"] == [2.0, 2.0, 3.0]) assert np.all(t["col2"] == [1.123, 2.988, 3.901]) def test_round_invalid(self, table_types): t = table_types.Table([[1, 2, 3]]) with pytest.raises( ValueError, match="'decimals' argument must be an int or a dict" ): t.round(0.5) def test_round_kind(self, table_types): for typecode in "bBhHiIlLqQpPefdgFDG": # AllInteger, AllFloat arr = np.array([4, 16], dtype=typecode) t = Table([arr]) col0 = t["col0"] t.round(decimals=-1) # Round to nearest 10 assert np.all(t["col0"] == [0, 20]) assert t["col0"] is col0 @pytest.mark.usefixtures("table_types") class TestColumnAccess: def test_1(self, table_types): t = table_types.Table() with pytest.raises(KeyError): t["a"] def test_2(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name="a", data=[1, 2, 3])) assert np.all(t["a"] == np.array([1, 2, 3])) with pytest.raises(KeyError): t["b"] # column does not exist def test_itercols(self, table_types): names = ["a", "b", "c"] t = table_types.Table([[1], [2], [3]], names=names) for name, col in zip(names, t.itercols()): assert name == col.name assert isinstance(col, table_types.Column) @pytest.mark.usefixtures("table_types") class TestAddLength(SetupData): def test_right_length(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) t.add_column(self.b) def test_too_long(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) with pytest.raises(ValueError): t.add_column( table_types.Column(name="b", data=[4, 5, 6, 7]) ) # data too long def test_too_short(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) with pytest.raises(ValueError): t.add_column(table_types.Column(name="b", data=[4, 5])) # data too short @pytest.mark.usefixtures("table_types") class TestAddPosition(SetupData): def test_1(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a, 0) def test_2(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a, 1) def test_3(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a, -1) def test_5(self, table_types): self._setup(table_types) t = table_types.Table() with pytest.raises(ValueError): t.index_column("b") def test_6(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a) t.add_column(self.b) assert t.colnames == ["a", "b"] def test_7(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) t.add_column(self.b, t.index_column("a")) assert t.colnames == ["b", "a"] def test_8(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) t.add_column(self.b, t.index_column("a") + 1) assert t.colnames == ["a", "b"] def test_9(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a) t.add_column(self.b, t.index_column("a") + 1) t.add_column(self.c, t.index_column("b")) assert t.colnames == ["a", "c", "b"] def test_10(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a) ia = t.index_column("a") t.add_column(self.b, ia + 1) t.add_column(self.c, ia) assert t.colnames == ["c", "a", "b"] @pytest.mark.usefixtures("table_types") class TestAddName(SetupData): def test_override_name(self, table_types): self._setup(table_types) t = table_types.Table() # Check that we can override the name of the input column in the Table t.add_column(self.a, name="b") t.add_column(self.b, name="a") assert t.colnames == ["b", "a"] # Check that we did not change the name of the input column assert self.a.info.name == "a" assert self.b.info.name == "b" # Now test with an input column from another table t2 = table_types.Table() t2.add_column(t["a"], name="c") assert t2.colnames == ["c"] # Check that we did not change the name of the input column assert t.colnames == ["b", "a"] # Check that we can give a name if none was present col = table_types.Column([1, 2, 3]) t.add_column(col, name="c") assert t.colnames == ["b", "a", "c"] def test_default_name(self, table_types): t = table_types.Table() col = table_types.Column([1, 2, 3]) t.add_column(col) assert t.colnames == ["col0"] @pytest.mark.usefixtures("table_types") class TestInitFromTable(SetupData): def test_from_table_cols(self, table_types): """Ensure that using cols from an existing table gives a clean copy. """ self._setup(table_types) t = self.t cols = t.columns # Construct Table with cols via Table._new_from_cols t2a = table_types.Table([cols["a"], cols["b"], self.c]) # Construct with add_column t2b = table_types.Table() t2b.add_column(cols["a"]) t2b.add_column(cols["b"]) t2b.add_column(self.c) t["a"][1] = 20 t["b"][1] = 21 for t2 in [t2a, t2b]: t2["a"][2] = 10 t2["b"][2] = 11 t2["c"][2] = 12 t2.columns["a"].meta["aa"][3] = 10 assert np.all(t["a"] == np.array([1, 20, 3])) assert np.all(t["b"] == np.array([4, 21, 6])) assert np.all(t2["a"] == np.array([1, 2, 10])) assert np.all(t2["b"] == np.array([4, 5, 11])) assert np.all(t2["c"] == np.array([7, 8, 12])) assert t2["a"].name == "a" assert t2.columns["a"].meta["aa"][3] == 10 assert t.columns["a"].meta["aa"][3] == 3 @pytest.mark.usefixtures("table_types") class TestAddColumns(SetupData): def test_add_columns1(self, table_types): self._setup(table_types) t = table_types.Table() t.add_columns([self.a, self.b, self.c]) assert t.colnames == ["a", "b", "c"] def test_add_columns2(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.add_columns([self.c, self.d]) assert t.colnames == ["a", "b", "c", "d"] assert np.all(t["c"] == np.array([7, 8, 9])) def test_add_columns3(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.add_columns([self.c, self.d], indexes=[1, 0]) assert t.colnames == ["d", "a", "c", "b"] def test_add_columns4(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.add_columns([self.c, self.d], indexes=[0, 0]) assert t.colnames == ["c", "d", "a", "b"] def test_add_columns5(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.add_columns([self.c, self.d], indexes=[2, 2]) assert t.colnames == ["a", "b", "c", "d"] def test_add_columns6(self, table_types): """Check that we can override column names.""" self._setup(table_types) t = table_types.Table() t.add_columns([self.a, self.b, self.c], names=["b", "c", "a"]) assert t.colnames == ["b", "c", "a"] def test_add_columns7(self, table_types): """Check that default names are used when appropriate.""" t = table_types.Table() col0 = table_types.Column([1, 2, 3]) col1 = table_types.Column([4, 5, 3]) t.add_columns([col0, col1]) assert t.colnames == ["col0", "col1"] def test_add_duplicate_column(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a) with pytest.raises(ValueError): t.add_column(table_types.Column(name="a", data=[0, 1, 2])) t.add_column( table_types.Column(name="a", data=[0, 1, 2]), rename_duplicate=True ) t.add_column(self.b) t.add_column(self.c) assert t.colnames == ["a", "a_1", "b", "c"] t.add_column( table_types.Column(name="a", data=[0, 1, 2]), rename_duplicate=True ) assert t.colnames == ["a", "a_1", "b", "c", "a_2"] # test adding column from a separate Table t1 = table_types.Table() t1.add_column(self.a) with pytest.raises(ValueError): t.add_column(t1["a"]) t.add_column(t1["a"], rename_duplicate=True) t1["a"][0] = 100 # Change original column assert t.colnames == ["a", "a_1", "b", "c", "a_2", "a_3"] assert t1.colnames == ["a"] # Check new column didn't change (since name conflict forced a copy) assert t["a_3"][0] == self.a[0] # Check that rename_duplicate=True is ok if there are no duplicates t.add_column( table_types.Column(name="q", data=[0, 1, 2]), rename_duplicate=True ) assert t.colnames == ["a", "a_1", "b", "c", "a_2", "a_3", "q"] def test_add_duplicate_columns(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b, self.c]) with pytest.raises(ValueError): t.add_columns( [ table_types.Column(name="a", data=[0, 1, 2]), table_types.Column(name="b", data=[0, 1, 2]), ] ) t.add_columns( [ table_types.Column(name="a", data=[0, 1, 2]), table_types.Column(name="b", data=[0, 1, 2]), ], rename_duplicate=True, ) t.add_column(self.d) assert t.colnames == ["a", "b", "c", "a_1", "b_1", "d"] @pytest.mark.usefixtures("table_types") class TestAddRow(SetupData): @property def b(self): if self._column_type is not None: if not hasattr(self, "_b"): self._b = self._column_type(name="b", data=[4.0, 5.1, 6.2]) return self._b @property def c(self): if self._column_type is not None: if not hasattr(self, "_c"): self._c = self._column_type(name="c", data=["7", "8", "9"]) return self._c @property def d(self): if self._column_type is not None: if not hasattr(self, "_d"): self._d = self._column_type(name="d", data=[[1, 2], [3, 4], [5, 6]]) return self._d @property def t(self): if self._table_type is not None: if not hasattr(self, "_t"): self._t = self._table_type([self.a, self.b, self.c]) return self._t def test_add_none_to_empty_table(self, table_types): self._setup(table_types) t = table_types.Table(names=("a", "b", "c"), dtype=("(2,)i", "S4", "O")) t.add_row() assert np.all(t["a"][0] == [0, 0]) assert t["b"][0] == "" assert t["c"][0] == 0 t.add_row() assert np.all(t["a"][1] == [0, 0]) assert t["b"][1] == "" assert t["c"][1] == 0 def test_add_stuff_to_empty_table(self, table_types): self._setup(table_types) t = table_types.Table(names=("a", "b", "obj"), dtype=("(2,)i", "S8", "O")) t.add_row([[1, 2], "hello", "world"]) assert np.all(t["a"][0] == [1, 2]) assert t["b"][0] == "hello" assert t["obj"][0] == "world" # Make sure it is not repeating last row but instead # adding zeros (as documented) t.add_row() assert np.all(t["a"][1] == [0, 0]) assert t["b"][1] == "" assert t["obj"][1] == 0 def test_add_table_row(self, table_types): self._setup(table_types) t = self.t t["d"] = self.d t2 = table_types.Table([self.a, self.b, self.c, self.d]) t.add_row(t2[0]) assert len(t) == 4 assert np.all(t["a"] == np.array([1, 2, 3, 1])) assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 4.0])) assert np.all(t["c"] == np.array(["7", "8", "9", "7"])) assert np.all(t["d"] == np.array([[1, 2], [3, 4], [5, 6], [1, 2]])) def test_add_table_row_obj(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b, self.obj]) t.add_row([1, 4.0, [10]]) assert len(t) == 4 assert np.all(t["a"] == np.array([1, 2, 3, 1])) assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 4.0])) assert np.all(t["obj"] == np.array([1, "string", 3, [10]], dtype="O")) def test_add_qtable_row_multidimensional(self): q = [[1, 2], [3, 4]] * u.m qt = table.QTable([q]) qt.add_row(([5, 6] * u.km,)) assert np.all(qt["col0"] == [[1, 2], [3, 4], [5000, 6000]] * u.m) def test_add_with_tuple(self, table_types): self._setup(table_types) t = self.t t.add_row((4, 7.2, "1")) assert len(t) == 4 assert np.all(t["a"] == np.array([1, 2, 3, 4])) assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 7.2])) assert np.all(t["c"] == np.array(["7", "8", "9", "1"])) def test_add_with_list(self, table_types): self._setup(table_types) t = self.t t.add_row([4, 7.2, "10"]) assert len(t) == 4 assert np.all(t["a"] == np.array([1, 2, 3, 4])) assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 7.2])) assert np.all(t["c"] == np.array(["7", "8", "9", "10"])) def test_add_with_dict(self, table_types): self._setup(table_types) t = self.t t.add_row({"a": 4, "b": 7.2}) assert len(t) == 4 assert np.all(t["a"] == np.array([1, 2, 3, 4])) assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 7.2])) if t.masked: assert np.all(t["c"] == np.array(["7", "8", "9", "7"])) else: assert np.all(t["c"] == np.array(["7", "8", "9", ""])) def test_add_with_none(self, table_types): self._setup(table_types) t = self.t t.add_row() assert len(t) == 4 assert np.all(t["a"].data == np.array([1, 2, 3, 0])) assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 0.0])) assert np.all(t["c"].data == np.array(["7", "8", "9", ""])) def test_add_missing_column(self, table_types): self._setup(table_types) t = self.t with pytest.raises(ValueError): t.add_row({"bad_column": 1}) def test_wrong_size_tuple(self, table_types): self._setup(table_types) t = self.t with pytest.raises(ValueError): t.add_row((1, 2)) def test_wrong_vals_type(self, table_types): self._setup(table_types) t = self.t with pytest.raises(TypeError): t.add_row(1) def test_add_row_failures(self, table_types): self._setup(table_types) t = self.t t_copy = table_types.Table(t, copy=True) # Wrong number of columns try: t.add_row([1, 2, 3, 4]) except ValueError: pass assert len(t) == 3 assert np.all(t.as_array() == t_copy.as_array()) # Wrong data type try: t.add_row(["one", 2, 3]) except ValueError: pass assert len(t) == 3 assert np.all(t.as_array() == t_copy.as_array()) def test_insert_table_row(self, table_types): """ Light testing of Table.insert_row() method. The deep testing is done via the add_row() tests which calls insert_row(index=len(self), ...), so here just test that the added index parameter is handled correctly. """ self._setup(table_types) row = (10, 40.0, "x", [10, 20]) for index in range(-3, 4): indices = np.insert(np.arange(3), index, 3) t = table_types.Table([self.a, self.b, self.c, self.d]) t2 = t.copy() t.add_row(row) # By now we know this works t2.insert_row(index, row) for name in t.colnames: if t[name].dtype.kind == "f": assert np.allclose(t[name][indices], t2[name]) else: assert np.all(t[name][indices] == t2[name]) for index in (-4, 4): t = table_types.Table([self.a, self.b, self.c, self.d]) with pytest.raises(IndexError): t.insert_row(index, row) @pytest.mark.usefixtures("table_types") class TestTableColumn(SetupData): def test_column_view(self, table_types): self._setup(table_types) t = self.t a = t.columns["a"] a[2] = 10 assert t["a"][2] == 10 @pytest.mark.usefixtures("table_types") class TestArrayColumns(SetupData): def test_1d(self, table_types): self._setup(table_types) b = table_types.Column(name="b", dtype=int, shape=(2,), length=3) t = table_types.Table([self.a]) t.add_column(b) assert t["b"].shape == (3, 2) assert t["b"][0].shape == (2,) def test_2d(self, table_types): self._setup(table_types) b = table_types.Column(name="b", dtype=int, shape=(2, 4), length=3) t = table_types.Table([self.a]) t.add_column(b) assert t["b"].shape == (3, 2, 4) assert t["b"][0].shape == (2, 4) def test_3d(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) b = table_types.Column(name="b", dtype=int, shape=(2, 4, 6), length=3) t.add_column(b) assert t["b"].shape == (3, 2, 4, 6) assert t["b"][0].shape == (2, 4, 6) @pytest.mark.usefixtures("table_types") class TestRemove(SetupData): @property def t(self): if self._table_type is not None: if not hasattr(self, "_t"): self._t = self._table_type([self.a]) return self._t @property def t2(self): if self._table_type is not None: if not hasattr(self, "_t2"): self._t2 = self._table_type([self.a, self.b, self.c]) return self._t2 def test_1(self, table_types): self._setup(table_types) self.t.remove_columns("a") assert self.t.colnames == [] assert self.t.as_array().size == 0 # Regression test for gh-8640 assert not self.t assert isinstance(self.t == None, np.ndarray) assert (self.t == None).size == 0 def test_2(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.remove_columns("a") assert self.t.colnames == ["b"] assert self.t.dtype.names == ("b",) assert np.all(self.t["b"] == np.array([4, 5, 6])) def test_3(self, table_types): """Check remove_columns works for a single column with a name of more than one character. Regression test against #2699""" self._setup(table_types) self.t["new_column"] = self.t["a"] assert "new_column" in self.t.columns.keys() self.t.remove_columns("new_column") assert "new_column" not in self.t.columns.keys() def test_remove_nonexistent_row(self, table_types): self._setup(table_types) with pytest.raises(IndexError): self.t.remove_row(4) def test_remove_row_0(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) self.t.remove_row(0) assert self.t.colnames == ["a", "b", "c"] assert np.all(self.t["b"] == np.array([5, 6])) def test_remove_row_1(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) self.t.remove_row(1) assert self.t.colnames == ["a", "b", "c"] assert np.all(self.t["a"] == np.array([1, 3])) def test_remove_row_2(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) self.t.remove_row(2) assert self.t.colnames == ["a", "b", "c"] assert np.all(self.t["c"] == np.array([7, 8])) def test_remove_row_slice(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) self.t.remove_rows(slice(0, 2, 1)) assert self.t.colnames == ["a", "b", "c"] assert np.all(self.t["c"] == np.array([9])) def test_remove_row_list(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) self.t.remove_rows([0, 2]) assert self.t.colnames == ["a", "b", "c"] assert np.all(self.t["c"] == np.array([8])) def test_remove_row_preserves_meta(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.remove_rows([0, 2]) assert self.t["a"].meta == {"aa": [0, 1, 2, 3, 4]} assert self.t.dtype == np.dtype([("a", "int"), ("b", "int")]) def test_delitem_row(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) del self.t[1] assert self.t.colnames == ["a", "b", "c"] assert np.all(self.t["a"] == np.array([1, 3])) @pytest.mark.parametrize("idx", [[0, 2], np.array([0, 2])]) def test_delitem_row_list(self, table_types, idx): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) del self.t[idx] assert self.t.colnames == ["a", "b", "c"] assert np.all(self.t["c"] == np.array([8])) def test_delitem_row_slice(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) del self.t[0:2] assert self.t.colnames == ["a", "b", "c"] assert np.all(self.t["c"] == np.array([9])) def test_delitem_row_fail(self, table_types): self._setup(table_types) with pytest.raises(IndexError): del self.t[4] def test_delitem_row_float(self, table_types): self._setup(table_types) with pytest.raises(IndexError): del self.t[1.0] def test_delitem1(self, table_types): self._setup(table_types) del self.t["a"] assert self.t.colnames == [] assert self.t.as_array().size == 0 # Regression test for gh-8640 assert not self.t assert isinstance(self.t == None, np.ndarray) assert (self.t == None).size == 0 def test_delitem2(self, table_types): self._setup(table_types) del self.t2["b"] assert self.t2.colnames == ["a", "c"] def test_delitems(self, table_types): self._setup(table_types) del self.t2["a", "b"] assert self.t2.colnames == ["c"] def test_delitem_fail(self, table_types): self._setup(table_types) with pytest.raises(KeyError): del self.t["d"] @pytest.mark.usefixtures("table_types") class TestKeep(SetupData): def test_1(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.keep_columns([]) assert t.colnames == [] assert t.as_array().size == 0 # Regression test for gh-8640 assert not t assert isinstance(t == None, np.ndarray) assert (t == None).size == 0 def test_2(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.keep_columns("b") assert t.colnames == ["b"] assert t.dtype.names == ("b",) assert np.all(t["b"] == np.array([4, 5, 6])) @pytest.mark.usefixtures("table_types") class TestRename(SetupData): def test_1(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) t.rename_column("a", "b") assert t.colnames == ["b"] assert t.dtype.names == ("b",) assert np.all(t["b"] == np.array([1, 2, 3])) def test_2(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.rename_column("a", "c") t.rename_column("b", "a") assert t.colnames == ["c", "a"] assert t.dtype.names == ("c", "a") if t.masked: assert t.mask.dtype.names == ("c", "a") assert np.all(t["c"] == np.array([1, 2, 3])) assert np.all(t["a"] == np.array([4, 5, 6])) def test_rename_by_attr(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t["a"].name = "c" t["b"].name = "a" assert t.colnames == ["c", "a"] assert t.dtype.names == ("c", "a") assert np.all(t["c"] == np.array([1, 2, 3])) assert np.all(t["a"] == np.array([4, 5, 6])) def test_rename_columns(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b, self.c]) t.rename_columns(("a", "b", "c"), ("aa", "bb", "cc")) assert t.colnames == ["aa", "bb", "cc"] t.rename_columns(["bb", "cc"], ["b", "c"]) assert t.colnames == ["aa", "b", "c"] with pytest.raises(TypeError): t.rename_columns("aa", ["a"]) with pytest.raises(ValueError): t.rename_columns(["a"], ["b", "c"]) @pytest.mark.usefixtures("table_types") class TestSort: def test_single(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name="a", data=[2, 1, 3])) t.add_column(table_types.Column(name="b", data=[6, 5, 4])) t.add_column( table_types.Column( name="c", data=[ (1, 2), (3, 4), (4, 5), ], ) ) assert np.all(t["a"] == np.array([2, 1, 3])) assert np.all(t["b"] == np.array([6, 5, 4])) t.sort("a") assert np.all(t["a"] == np.array([1, 2, 3])) assert np.all(t["b"] == np.array([5, 6, 4])) assert np.all( t["c"] == np.array( [ [3, 4], [1, 2], [4, 5], ] ) ) t.sort("b") assert np.all(t["a"] == np.array([3, 1, 2])) assert np.all(t["b"] == np.array([4, 5, 6])) assert np.all( t["c"] == np.array( [ [4, 5], [3, 4], [1, 2], ] ) ) @pytest.mark.parametrize("create_index", [False, True]) def test_single_reverse(self, table_types, create_index): t = table_types.Table() t.add_column(table_types.Column(name="a", data=[2, 1, 3])) t.add_column(table_types.Column(name="b", data=[6, 5, 4])) t.add_column(table_types.Column(name="c", data=[(1, 2), (3, 4), (4, 5)])) assert np.all(t["a"] == np.array([2, 1, 3])) assert np.all(t["b"] == np.array([6, 5, 4])) t.sort("a", reverse=True) assert np.all(t["a"] == np.array([3, 2, 1])) assert np.all(t["b"] == np.array([4, 6, 5])) assert np.all(t["c"] == np.array([[4, 5], [1, 2], [3, 4]])) t.sort("b", reverse=True) assert np.all(t["a"] == np.array([2, 1, 3])) assert np.all(t["b"] == np.array([6, 5, 4])) assert np.all(t["c"] == np.array([[1, 2], [3, 4], [4, 5]])) def test_single_big(self, table_types): """Sort a big-ish table with a non-trivial sort order""" x = np.arange(10000) y = np.sin(x) t = table_types.Table([x, y], names=("x", "y")) t.sort("y") idx = np.argsort(y) assert np.all(t["x"] == x[idx]) assert np.all(t["y"] == y[idx]) @pytest.mark.parametrize("reverse", [True, False]) def test_empty_reverse(self, table_types, reverse): t = table_types.Table([[], []], dtype=["f4", "U1"]) t.sort("col1", reverse=reverse) def test_multiple(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name="a", data=[2, 1, 3, 2, 3, 1])) t.add_column(table_types.Column(name="b", data=[6, 5, 4, 3, 5, 4])) assert np.all(t["a"] == np.array([2, 1, 3, 2, 3, 1])) assert np.all(t["b"] == np.array([6, 5, 4, 3, 5, 4])) t.sort(["a", "b"]) assert np.all(t["a"] == np.array([1, 1, 2, 2, 3, 3])) assert np.all(t["b"] == np.array([4, 5, 3, 6, 4, 5])) t.sort(["b", "a"]) assert np.all(t["a"] == np.array([2, 1, 3, 1, 3, 2])) assert np.all(t["b"] == np.array([3, 4, 4, 5, 5, 6])) t.sort(("a", "b")) assert np.all(t["a"] == np.array([1, 1, 2, 2, 3, 3])) assert np.all(t["b"] == np.array([4, 5, 3, 6, 4, 5])) def test_multiple_reverse(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name="a", data=[2, 1, 3, 2, 3, 1])) t.add_column(table_types.Column(name="b", data=[6, 5, 4, 3, 5, 4])) assert np.all(t["a"] == np.array([2, 1, 3, 2, 3, 1])) assert np.all(t["b"] == np.array([6, 5, 4, 3, 5, 4])) t.sort(["a", "b"], reverse=True) assert np.all(t["a"] == np.array([3, 3, 2, 2, 1, 1])) assert np.all(t["b"] == np.array([5, 4, 6, 3, 5, 4])) t.sort(["b", "a"], reverse=True) assert np.all(t["a"] == np.array([2, 3, 1, 3, 1, 2])) assert np.all(t["b"] == np.array([6, 5, 5, 4, 4, 3])) t.sort(("a", "b"), reverse=True) assert np.all(t["a"] == np.array([3, 3, 2, 2, 1, 1])) assert np.all(t["b"] == np.array([5, 4, 6, 3, 5, 4])) def test_multiple_with_bytes(self, table_types): t = table_types.Table() t.add_column( table_types.Column(name="firstname", data=[b"Max", b"Jo", b"John"]) ) t.add_column( table_types.Column(name="name", data=[b"Miller", b"Miller", b"Jackson"]) ) t.add_column(table_types.Column(name="tel", data=[12, 15, 19])) t.sort(["name", "firstname"]) assert np.all([t["firstname"] == np.array([b"John", b"Jo", b"Max"])]) assert np.all([t["name"] == np.array([b"Jackson", b"Miller", b"Miller"])]) assert np.all([t["tel"] == np.array([19, 15, 12])]) def test_multiple_with_unicode(self, table_types): # Before Numpy 1.6.2, sorting with multiple column names # failed when a unicode column was present. t = table_types.Table() t.add_column( table_types.Column( name="firstname", data=[str(x) for x in ["Max", "Jo", "John"]] ) ) t.add_column( table_types.Column( name="name", data=[str(x) for x in ["Miller", "Miller", "Jackson"]] ) ) t.add_column(table_types.Column(name="tel", data=[12, 15, 19])) t.sort(["name", "firstname"]) assert np.all( [t["firstname"] == np.array([str(x) for x in ["John", "Jo", "Max"]])] ) assert np.all( [t["name"] == np.array([str(x) for x in ["Jackson", "Miller", "Miller"]])] ) assert np.all([t["tel"] == np.array([19, 15, 12])]) def test_argsort(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name="a", data=[2, 1, 3, 2, 3, 1])) t.add_column(table_types.Column(name="b", data=[6, 5, 4, 3, 5, 4])) assert np.all(t.argsort() == t.as_array().argsort()) i0 = t.argsort("a") i1 = t.as_array().argsort(order=["a"]) assert np.all(t["a"][i0] == t["a"][i1]) i0 = t.argsort(["a", "b"]) i1 = t.as_array().argsort(order=["a", "b"]) assert np.all(t["a"][i0] == t["a"][i1]) assert np.all(t["b"][i0] == t["b"][i1]) @pytest.mark.parametrize("add_index", [False, True]) def test_argsort_reverse(self, table_types, add_index): t = table_types.Table() t.add_column(table_types.Column(name="a", data=[2, 1, 3, 2, 3, 1])) t.add_column(table_types.Column(name="b", data=[6, 5, 4, 3, 5, 4])) if add_index: t.add_index("a") assert np.all(t.argsort(reverse=True) == np.array([4, 2, 0, 3, 1, 5])) i0 = t.argsort("a", reverse=True) i1 = np.array([4, 2, 3, 0, 5, 1]) assert np.all(t["a"][i0] == t["a"][i1]) i0 = t.argsort(["a", "b"], reverse=True) i1 = np.array([4, 2, 0, 3, 1, 5]) assert np.all(t["a"][i0] == t["a"][i1]) assert np.all(t["b"][i0] == t["b"][i1]) def test_argsort_bytes(self, table_types): t = table_types.Table() t.add_column( table_types.Column(name="firstname", data=[b"Max", b"Jo", b"John"]) ) t.add_column( table_types.Column(name="name", data=[b"Miller", b"Miller", b"Jackson"]) ) t.add_column(table_types.Column(name="tel", data=[12, 15, 19])) assert np.all(t.argsort(["name", "firstname"]) == np.array([2, 1, 0])) def test_argsort_unicode(self, table_types): # Before Numpy 1.6.2, sorting with multiple column names # failed when a unicode column was present. t = table_types.Table() t.add_column( table_types.Column( name="firstname", data=[str(x) for x in ["Max", "Jo", "John"]] ) ) t.add_column( table_types.Column( name="name", data=[str(x) for x in ["Miller", "Miller", "Jackson"]] ) ) t.add_column(table_types.Column(name="tel", data=[12, 15, 19])) assert np.all(t.argsort(["name", "firstname"]) == np.array([2, 1, 0])) def test_rebuild_column_view_then_rename(self, table_types): """ Issue #2039 where renaming fails after any method that calls _rebuild_table_column_view (this includes sort and add_row). """ t = table_types.Table([[1]], names=("a",)) assert t.colnames == ["a"] assert t.dtype.names == ("a",) t.add_row((2,)) assert t.colnames == ["a"] assert t.dtype.names == ("a",) t.rename_column("a", "b") assert t.colnames == ["b"] assert t.dtype.names == ("b",) t.sort("b") assert t.colnames == ["b"] assert t.dtype.names == ("b",) t.rename_column("b", "c") assert t.colnames == ["c"] assert t.dtype.names == ("c",) @pytest.mark.parametrize("kwargs", [{}, {"kind": "stable"}, {"kind": "quicksort"}]) def test_sort_kind(kwargs): t = Table() t["a"] = [2, 1, 3, 2, 3, 1] t["b"] = [6, 5, 4, 3, 5, 4] t_struct = t.as_array() # Since sort calls Table.argsort this covers `kind` for both methods t.sort(["a", "b"], **kwargs) assert np.all(t.as_array() == np.sort(t_struct, **kwargs)) @pytest.mark.usefixtures("table_types") class TestIterator: def test_iterator(self, table_types): d = np.array( [ (2, 1), (3, 6), (4, 5), ], dtype=[("a", "i4"), ("b", "i4")], ) t = table_types.Table(d) if t.masked: with pytest.raises(ValueError): t[0] == d[0] else: for row, np_row in zip(t, d): assert np.all(row == np_row) @pytest.mark.usefixtures("table_types") class TestSetMeta: def test_set_meta(self, table_types): d = table_types.Table(names=("a", "b")) d.meta["a"] = 1 d.meta["b"] = 1 d.meta["c"] = 1 d.meta["d"] = 1 assert list(d.meta.keys()) == ["a", "b", "c", "d"] @pytest.mark.usefixtures("table_types") class TestConvertNumpyArray: def test_convert_numpy_array(self, table_types): d = table_types.Table([[1, 2], [3, 4]], names=("a", "b")) np_data = np.array(d) if table_types.Table is not MaskedTable: assert np.all(np_data == d.as_array()) assert np_data is not d.as_array() assert d.colnames == list(np_data.dtype.names) np_data = np.array(d, copy=False) if table_types.Table is not MaskedTable: assert np.all(np_data == d.as_array()) assert d.colnames == list(np_data.dtype.names) with pytest.raises(ValueError): np_data = np.array(d, dtype=[("c", "i8"), ("d", "i8")]) def test_as_array_byteswap(self, table_types): """Test for https://github.com/astropy/astropy/pull/4080""" byte_orders = (">", "<") native_order = byte_orders[sys.byteorder == "little"] for order in byte_orders: col = table_types.Column([1.0, 2.0], name="a", dtype=order + "f8") t = table_types.Table([col]) arr = t.as_array() assert arr["a"].dtype.byteorder in (native_order, "=") arr = t.as_array(keep_byteorder=True) if order == native_order: assert arr["a"].dtype.byteorder in (order, "=") else: assert arr["a"].dtype.byteorder == order def test_byteswap_fits_array(self, table_types): """ Test for https://github.com/astropy/astropy/pull/4080, demonstrating that FITS tables are converted to native byte order. """ non_native_order = (">", "<")[sys.byteorder != "little"] filename = get_pkg_data_filename("data/tb.fits", "astropy.io.fits.tests") t = table_types.Table.read(filename) arr = t.as_array() for idx in range(len(arr.dtype)): assert arr.dtype[idx].byteorder != non_native_order with fits.open(filename, character_as_bytes=True) as hdul: data = hdul[1].data for colname in data.columns.names: assert np.all(data[colname] == arr[colname]) arr2 = t.as_array(keep_byteorder=True) for colname in data.columns.names: assert data[colname].dtype.byteorder == arr2[colname].dtype.byteorder def test_convert_numpy_object_array(self, table_types): d = table_types.Table([[1, 2], [3, 4]], names=("a", "b")) # Single table np_d = np.array(d, dtype=object) assert isinstance(np_d, np.ndarray) assert np_d[()] is d def test_convert_list_numpy_object_array(self, table_types): d = table_types.Table([[1, 2], [3, 4]], names=("a", "b")) ds = [d, d, d] np_ds = np.array(ds, dtype=object) assert all([isinstance(t, table_types.Table) for t in np_ds]) assert all([np.array_equal(t, d) for t in np_ds]) def _assert_copies(t, t2, deep=True): assert t.colnames == t2.colnames np.testing.assert_array_equal(t.as_array(), t2.as_array()) assert t.meta == t2.meta for col, col2 in zip(t.columns.values(), t2.columns.values()): if deep: assert not np.may_share_memory(col, col2) else: assert np.may_share_memory(col, col2) def test_copy(): t = table.Table([[1, 2, 3], [2, 3, 4]], names=["x", "y"]) t2 = t.copy() _assert_copies(t, t2) def test_copy_masked(): t = table.Table( [[1, 2, 3], [2, 3, 4]], names=["x", "y"], masked=True, meta={"name": "test"} ) t["x"].mask == [True, False, True] t2 = t.copy() _assert_copies(t, t2) def test_copy_protocol(): t = table.Table([[1, 2, 3], [2, 3, 4]], names=["x", "y"]) t2 = copy.copy(t) t3 = copy.deepcopy(t) _assert_copies(t, t2, deep=False) _assert_copies(t, t3) def test_disallow_inequality_comparisons(): """ Regression test for #828 - disallow comparison operators on whole Table """ t = table.Table() with pytest.raises(TypeError): t > 2 with pytest.raises(TypeError): t < 1.1 with pytest.raises(TypeError): t >= 5.5 with pytest.raises(TypeError): t <= -1.1 def test_values_equal_part1(): col1 = [1, 2] col2 = [1.0, 2.0] col3 = ["a", "b"] t1 = table.Table([col1, col2, col3], names=["a", "b", "c"]) t2 = table.Table([col1, col2], names=["a", "b"]) t3 = table.table_helpers.simple_table() tm = t1.copy() tm["time"] = Time([1, 2], format="cxcsec") tm1 = tm.copy() tm1["time"][0] = np.ma.masked tq = table.table_helpers.simple_table() tq["quantity"] = [1.0, 2.0, 3.0] * u.m tsk = table.table_helpers.simple_table() tsk["sk"] = SkyCoord(1, 2, unit="deg") eqsk = tsk.values_equal(tsk) for col in eqsk.itercols(): assert np.all(col) with pytest.raises( ValueError, match="cannot compare tables with different column names" ): t2.values_equal(t1) with pytest.raises(ValueError, match="unable to compare column a"): # Shape mismatch t3.values_equal(t1) if NUMPY_LT_1_25: with pytest.raises(ValueError, match="unable to compare column c"): # Type mismatch in column c causes FutureWarning t1.values_equal(2) with pytest.raises(ValueError, match="unable to compare column c"): t1.values_equal([1, 2]) else: eq = t2.values_equal(2) for col in eq.colnames: assert np.all(eq[col] == [False, True]) eq = t2.values_equal([1, 2]) for col in eq.colnames: assert np.all(eq[col] == [True, True]) eq = t2.values_equal(t2) for col in eq.colnames: assert np.all(eq[col] == [True, True]) eq1 = tm1.values_equal(tm) for col in eq1.colnames: assert np.all(eq1[col] == [True, True]) eq2 = tq.values_equal(tq) for col in eq2.colnames: assert np.all(eq2[col] == [True, True, True]) eq3 = t2.values_equal(2) for col in eq3.colnames: assert np.all(eq3[col] == [False, True]) eq4 = t2.values_equal([1, 2]) for col in eq4.colnames: assert np.all(eq4[col] == [True, True]) # Compare table to its first row t = table.Table(rows=[(1, "a"), (1, "b")]) eq = t.values_equal(t[0]) assert np.all(eq["col0"] == [True, True]) assert np.all(eq["col1"] == [True, False]) def test_rows_equal(): t = table.Table.read( [ " a b c d", " 2 c 7.0 0", " 2 b 5.0 1", " 2 b 6.0 2", " 2 a 4.0 3", " 0 a 0.0 4", " 1 b 3.0 5", " 1 a 2.0 6", " 1 a 1.0 7", ], format="ascii", ) # All rows are equal assert np.all(t == t) # Assert no rows are different assert not np.any(t != t) # Check equality result for a given row assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool)) # Check inequality result for a given row assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool)) t2 = table.Table.read( [ " a b c d", " 2 c 7.0 0", " 2 b 5.0 1", " 3 b 6.0 2", " 2 a 4.0 3", " 0 a 1.0 4", " 1 b 3.0 5", " 1 c 2.0 6", " 1 a 1.0 7", ], format="ascii", ) # In the above cases, Row.__eq__ gets called, but now need to make sure # Table.__eq__ also gets called. assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool)) # Check that comparing to a structured array works assert np.all( (t == t2.as_array()) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool) ) assert np.all( (t.as_array() == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool) ) def test_equality_masked(): t = table.Table.read( [ " a b c d", " 2 c 7.0 0", " 2 b 5.0 1", " 2 b 6.0 2", " 2 a 4.0 3", " 0 a 0.0 4", " 1 b 3.0 5", " 1 a 2.0 6", " 1 a 1.0 7", ], format="ascii", ) # Make into masked table t = table.Table(t, masked=True) # All rows are equal assert np.all(t == t) # Assert no rows are different assert not np.any(t != t) # Check equality result for a given row assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool)) # Check inequality result for a given row assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool)) t2 = table.Table.read( [ " a b c d", " 2 c 7.0 0", " 2 b 5.0 1", " 3 b 6.0 2", " 2 a 4.0 3", " 0 a 1.0 4", " 1 b 3.0 5", " 1 c 2.0 6", " 1 a 1.0 7", ], format="ascii", ) # In the above cases, Row.__eq__ gets called, but now need to make sure # Table.__eq__ also gets called. assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool)) # Check that masking a value causes the row to differ t.mask["a"][0] = True assert np.all((t == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) assert np.all((t != t2) == np.array([1, 0, 1, 0, 1, 0, 1, 0], dtype=bool)) # Check that comparing to a structured array works assert np.all( (t == t2.as_array()) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool) ) @pytest.mark.xfail def test_equality_masked_bug(): """ This highlights a Numpy bug. Once it works, it can be moved into the test_equality_masked test. Related Numpy bug report: https://github.com/numpy/numpy/issues/3840 """ t = table.Table.read( [ " a b c d", " 2 c 7.0 0", " 2 b 5.0 1", " 2 b 6.0 2", " 2 a 4.0 3", " 0 a 0.0 4", " 1 b 3.0 5", " 1 a 2.0 6", " 1 a 1.0 7", ], format="ascii", ) t = table.Table(t, masked=True) t2 = table.Table.read( [ " a b c d", " 2 c 7.0 0", " 2 b 5.0 1", " 3 b 6.0 2", " 2 a 4.0 3", " 0 a 1.0 4", " 1 b 3.0 5", " 1 c 2.0 6", " 1 a 1.0 7", ], format="ascii", ) assert np.all( (t.as_array() == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool) ) # Check that the meta descriptor is working as expected. The MetaBaseTest class # takes care of defining all the tests, and we simply have to define the class # and any minimal set of args to pass. class TestMetaTable(MetaBaseTest): test_class = table.Table args = () def test_unicode_content(): # If we don't have unicode literals then return if isinstance("", bytes): return # Define unicode literals string_a = "астрономическая питона" string_b = "миллиарды световых лет" a = table.Table([[string_a, 2], [string_b, 3]], names=("a", "b")) assert string_a in str(a) # This only works because the coding of this file is utf-8, which # matches the default encoding of Table.__str__ assert string_a.encode("utf-8") in bytes(a) def test_unicode_policy(): t = table.Table.read( [ " a b c d", " 2 c 7.0 0", " 2 b 5.0 1", " 2 b 6.0 2", " 2 a 4.0 3", " 0 a 0.0 4", " 1 b 3.0 5", " 1 a 2.0 6", " 1 a 1.0 7", ], format="ascii", ) assert_follows_unicode_guidelines(t) @pytest.mark.parametrize("uni", ["питона", "ascii"]) def test_unicode_bytestring_conversion(table_types, uni): """ Test converting columns to all unicode or all bytestring. This makes two columns, one which is unicode (str in Py3) and one which is bytes (UTF-8 encoded). There are two code paths in the conversions, a faster one where the data are actually ASCII and a slower one where UTF-8 conversion is required. This tests both via the ``uni`` param. """ byt = uni.encode("utf-8") t = table_types.Table([[byt], [uni], [1]], dtype=("S", "U", "i")) assert t["col0"].dtype.kind == "S" assert t["col1"].dtype.kind == "U" assert t["col2"].dtype.kind == "i" t["col0"].description = "col0" t["col1"].description = "col1" t["col0"].meta["val"] = "val0" t["col1"].meta["val"] = "val1" # Unicode to bytestring t1 = t.copy() t1.convert_unicode_to_bytestring() assert t1["col0"].dtype.kind == "S" assert t1["col1"].dtype.kind == "S" assert t1["col2"].dtype.kind == "i" # Meta made it through assert t1["col0"].description == "col0" assert t1["col1"].description == "col1" assert t1["col0"].meta["val"] == "val0" assert t1["col1"].meta["val"] == "val1" # Need to de-fang the automatic unicode sandwiching of Table assert np.array(t1["col0"])[0] == byt assert np.array(t1["col1"])[0] == byt assert np.array(t1["col2"])[0] == 1 # Bytestring to unicode t1 = t.copy() t1.convert_bytestring_to_unicode() assert t1["col0"].dtype.kind == "U" assert t1["col1"].dtype.kind == "U" assert t1["col2"].dtype.kind == "i" # Meta made it through assert t1["col0"].description == "col0" assert t1["col1"].description == "col1" assert t1["col0"].meta["val"] == "val0" assert t1["col1"].meta["val"] == "val1" # No need to de-fang the automatic unicode sandwiching of Table here, but # do just for consistency to prove things are working. assert np.array(t1["col0"])[0] == uni assert np.array(t1["col1"])[0] == uni assert np.array(t1["col2"])[0] == 1 def test_table_deletion(): """ Regression test for the reference cycle discussed in https://github.com/astropy/astropy/issues/2877 """ deleted = set() # A special table subclass which leaves a record when it is finalized class TestTable(table.Table): def __del__(self): deleted.add(id(self)) t = TestTable({"a": [1, 2, 3]}) the_id = id(t) assert t["a"].parent_table is t del t # Cleanup gc.collect() assert the_id in deleted def test_nested_iteration(): """ Regression test for issue 3358 where nested iteration over a single table fails. """ t = table.Table([[0, 1]], names=["a"]) out = [] for r1 in t: for r2 in t: out.append((r1["a"], r2["a"])) assert out == [(0, 0), (0, 1), (1, 0), (1, 1)] def test_table_init_from_degenerate_arrays(table_types): t = table_types.Table(np.array([])) assert len(t.columns) == 0 with pytest.raises(ValueError): t = table_types.Table(np.array(0)) t = table_types.Table(np.array([1, 2, 3])) assert len(t.columns) == 3 @pytest.mark.skipif(not HAS_PANDAS, reason="requires pandas") class TestPandas: def test_simple(self): t = table.Table() for endian in ["<", ">", "="]: for kind in ["f", "i"]: for byte in ["2", "4", "8"]: dtype = np.dtype(endian + kind + byte) x = np.array([1, 2, 3], dtype=dtype) t[endian + kind + byte] = x.newbyteorder(endian) t["u"] = ["a", "b", "c"] t["s"] = ["a", "b", "c"] d = t.to_pandas() for column in t.columns: if column == "u": assert np.all(t["u"] == np.array(["a", "b", "c"])) assert d[column].dtype == np.dtype("O") # upstream feature of pandas elif column == "s": assert np.all(t["s"] == np.array(["a", "b", "c"])) assert d[column].dtype == np.dtype("O") # upstream feature of pandas else: # We should be able to compare exact values here assert np.all(t[column] == d[column]) if t[column].dtype.isnative: assert d[column].dtype == t[column].dtype else: assert d[column].dtype == t[column].byteswap().newbyteorder().dtype # Regression test for astropy/astropy#1156 - the following code gave a # ValueError: Big-endian buffer not supported on little-endian # compiler. We now automatically swap the endian-ness to native order # upon adding the arrays to the data frame. # Explicitly testing little/big/native endian separately - # regression for a case in astropy/astropy#11286 not caught by #3729. d[["<i4", ">i4"]] d[["<f4", ">f4"]] t2 = table.Table.from_pandas(d) for column in t.columns: if column in ("u", "s"): assert np.all(t[column] == t2[column]) else: assert_allclose(t[column], t2[column]) if t[column].dtype.isnative: assert t[column].dtype == t2[column].dtype else: assert t[column].byteswap().newbyteorder().dtype == t2[column].dtype @pytest.mark.parametrize("unsigned", ["u", ""]) @pytest.mark.parametrize("bits", [8, 16, 32, 64]) def test_nullable_int(self, unsigned, bits): np_dtype = f"{unsigned}int{bits}" c = MaskedColumn([1, 2], mask=[False, True], dtype=np_dtype) t = Table([c]) df = t.to_pandas() pd_dtype = np_dtype.replace("i", "I").replace("u", "U") assert str(df["col0"].dtype) == pd_dtype t2 = Table.from_pandas(df) assert str(t2["col0"].dtype) == np_dtype assert np.all(t2["col0"].mask == [False, True]) assert np.all(t2["col0"] == c) def test_2d(self): t = table.Table() t["a"] = [1, 2, 3] t["b"] = np.ones((3, 2)) with pytest.raises( ValueError, match="Cannot convert a table with multidimensional columns" ): t.to_pandas() def test_mixin_pandas(self): t = table.QTable() for name in sorted(MIXIN_COLS): if not name.startswith("ndarray"): t[name] = MIXIN_COLS[name] t["dt"] = TimeDelta([0, 2, 4, 6], format="sec") tp = t.to_pandas() t2 = table.Table.from_pandas(tp) assert np.allclose(t2["quantity"], [0, 1, 2, 3]) assert np.allclose(t2["longitude"], [0.0, 1.0, 5.0, 6.0]) assert np.allclose(t2["latitude"], [5.0, 6.0, 10.0, 11.0]) assert np.allclose(t2["skycoord.ra"], [0, 1, 2, 3]) assert np.allclose(t2["skycoord.dec"], [0, 1, 2, 3]) assert np.allclose(t2["arraywrap"], [0, 1, 2, 3]) assert np.allclose(t2["arrayswap"], [0, 1, 2, 3]) assert np.allclose( t2["earthlocation.y"], [0, 110708, 547501, 654527], rtol=0, atol=1 ) # For pandas, Time, TimeDelta are the mixins that round-trip the class assert isinstance(t2["time"], Time) assert np.allclose(t2["time"].jyear, [2000, 2001, 2002, 2003]) assert np.all( t2["time"].isot == [ "2000-01-01T12:00:00.000", "2000-12-31T18:00:00.000", "2002-01-01T00:00:00.000", "2003-01-01T06:00:00.000", ] ) assert t2["time"].format == "isot" # TimeDelta assert isinstance(t2["dt"], TimeDelta) assert np.allclose(t2["dt"].value, [0, 2, 4, 6]) assert t2["dt"].format == "sec" @pytest.mark.parametrize("use_IndexedTable", [False, True]) def test_to_pandas_index(self, use_IndexedTable): """Test to_pandas() with different indexing options. This also tests the fix for #12014. The exception seen there is reproduced here without the fix. """ import pandas as pd class IndexedTable(table.QTable): """Always index the first column""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.add_index(self.colnames[0]) row_index = pd.RangeIndex(0, 2, 1) tm_index = pd.DatetimeIndex( ["1998-01-01", "2002-01-01"], dtype="datetime64[ns]", name="tm", freq=None ) tm = Time([1998, 2002], format="jyear") x = [1, 2] table_cls = IndexedTable if use_IndexedTable else table.QTable t = table_cls([tm, x], names=["tm", "x"]) tp = t.to_pandas() if not use_IndexedTable: assert np.all(tp.index == row_index) tp = t.to_pandas(index="tm") assert np.all(tp.index == tm_index) t.add_index("tm") tp = t.to_pandas() assert np.all(tp.index == tm_index) # Make sure writing to pandas didn't hack the original table assert t["tm"].info.indices tp = t.to_pandas(index=True) assert np.all(tp.index == tm_index) tp = t.to_pandas(index=False) assert np.all(tp.index == row_index) with pytest.raises(ValueError) as err: t.to_pandas(index="not a column") assert "index must be None, False" in str(err.value) def test_mixin_pandas_masked(self): tm = Time([1, 2, 3], format="cxcsec") dt = TimeDelta([1, 2, 3], format="sec") tm[1] = np.ma.masked dt[1] = np.ma.masked t = table.QTable([tm, dt], names=["tm", "dt"]) tp = t.to_pandas() assert np.all(tp["tm"].isnull() == [False, True, False]) assert np.all(tp["dt"].isnull() == [False, True, False]) t2 = table.Table.from_pandas(tp) assert np.all(t2["tm"].mask == tm.mask) assert np.ma.allclose(t2["tm"].jd, tm.jd, rtol=1e-14, atol=1e-14) assert np.all(t2["dt"].mask == dt.mask) assert np.ma.allclose(t2["dt"].jd, dt.jd, rtol=1e-14, atol=1e-14) def test_from_pandas_index(self): tm = Time([1998, 2002], format="jyear") x = [1, 2] t = table.Table([tm, x], names=["tm", "x"]) tp = t.to_pandas(index="tm") t2 = table.Table.from_pandas(tp) assert t2.colnames == ["x"] t2 = table.Table.from_pandas(tp, index=True) assert t2.colnames == ["tm", "x"] assert np.allclose(t2["tm"].jyear, tm.jyear) @pytest.mark.parametrize("use_nullable_int", [True, False]) def test_masking(self, use_nullable_int): t = table.Table(masked=True) t["a"] = [1, 2, 3] t["a"].mask = [True, False, True] t["b"] = [1.0, 2.0, 3.0] t["b"].mask = [False, False, True] t["u"] = ["a", "b", "c"] t["u"].mask = [False, True, False] t["s"] = ["a", "b", "c"] t["s"].mask = [False, True, False] # https://github.com/astropy/astropy/issues/7741 t["Source"] = [2584290278794471936, 2584290038276303744, 2584288728310999296] t["Source"].mask = [False, False, False] if use_nullable_int: # Default # No warning with the default use_nullable_int=True d = t.to_pandas(use_nullable_int=use_nullable_int) else: with pytest.warns( TableReplaceWarning, match=r"converted column 'a' from int(32|64) to float64", ): d = t.to_pandas(use_nullable_int=use_nullable_int) t2 = table.Table.from_pandas(d) for name, column in t.columns.items(): assert np.all(column.data == t2[name].data) if hasattr(t2[name], "mask"): assert np.all(column.mask == t2[name].mask) if column.dtype.kind == "i": if np.any(column.mask) and not use_nullable_int: assert t2[name].dtype.kind == "f" else: assert t2[name].dtype.kind == "i" # This warning pops up when use_nullable_int is False # for pandas 1.5.2. with np.errstate(invalid="ignore"): assert_array_equal(column.data, t2[name].data.astype(column.dtype)) else: if column.dtype.byteorder in ("=", "|"): assert column.dtype == t2[name].dtype else: assert column.byteswap().newbyteorder().dtype == t2[name].dtype def test_units(self): import pandas as pd import astropy.units as u df = pd.DataFrame({"x": [1, 2, 3], "t": [1.3, 1.2, 1.8]}) t = table.Table.from_pandas(df, units={"x": u.m, "t": u.s}) assert t["x"].unit == u.m assert t["t"].unit == u.s # test error if not a mapping with pytest.raises(TypeError): table.Table.from_pandas(df, units=[u.m, u.s]) # test warning is raised if additional columns in units dict with pytest.warns(UserWarning) as record: table.Table.from_pandas(df, units={"x": u.m, "t": u.s, "y": u.m}) assert len(record) == 1 assert "{'y'}" in record[0].message.args[0] def test_to_pandas_masked_int_data_with__index(self): data = {"data": [0, 1, 2], "index": [10, 11, 12]} t = table.Table(data=data, masked=True) t.add_index("index") t["data"].mask = [1, 1, 0] df = t.to_pandas() assert df["data"].iloc[-1] == 2 @pytest.mark.usefixtures("table_types") class TestReplaceColumn(SetupData): def test_fail_replace_column(self, table_types): """Raise exception when trying to replace column via table.columns object""" self._setup(table_types) t = table_types.Table([self.a, self.b]) with pytest.raises( ValueError, match=r"Cannot replace column 'a'. Use " "Table.replace_column.. instead.", ): t.columns["a"] = [1, 2, 3] with pytest.raises( ValueError, match=r"column name not there is not in the table" ): t.replace_column("not there", [1, 2, 3]) with pytest.raises( ValueError, match=r"length of new column must match table length" ): t.replace_column("a", [1, 2]) def test_replace_column(self, table_types): """Replace existing column with a new column""" self._setup(table_types) t = table_types.Table([self.a, self.b]) ta = t["a"] tb = t["b"] vals = [1.2, 3.4, 5.6] for col in ( vals, table_types.Column(vals), table_types.Column(vals, name="a"), table_types.Column(vals, name="b"), ): t.replace_column("a", col) assert np.all(t["a"] == vals) assert t["a"] is not ta # New a column assert t["b"] is tb # Original b column unchanged assert t.colnames == ["a", "b"] assert t["a"].meta == {} assert t["a"].format is None # Special case: replacing the only column can resize table del t["b"] assert len(t) == 3 t["a"] = [1, 2] assert len(t) == 2 def test_replace_index_column(self, table_types): """Replace index column and generate expected exception""" self._setup(table_types) t = table_types.Table([self.a, self.b]) t.add_index("a") with pytest.raises(ValueError) as err: t.replace_column("a", [1, 2, 3]) assert err.value.args[0] == "cannot replace a table index column" def test_replace_column_no_copy(self): t = Table([[1, 2], [3, 4]], names=["a", "b"]) a = np.array([1.5, 2.5]) t.replace_column("a", a, copy=False) assert t["a"][0] == a[0] t["a"][0] = 10 assert t["a"][0] == a[0] class TestQTableColumnConversionCornerCases: def test_replace_with_masked_col_with_units_in_qtable(self): """This is a small regression from #8902""" t = QTable([[1, 2], [3, 4]], names=["a", "b"]) t["a"] = MaskedColumn([5, 6], unit="m") assert isinstance(t["a"], u.Quantity) def test_do_not_replace_string_column_with_units_in_qtable(self): t = QTable([[1 * u.m]]) with pytest.warns(AstropyUserWarning, match="convert it to Quantity failed"): t["a"] = Column(["a"], unit=u.m) assert isinstance(t["a"], Column) class Test__Astropy_Table__: """ Test initializing a Table subclass from a table-like object that implements the __astropy_table__ interface method. """ class SimpleTable: def __init__(self): self.columns = [[1, 2, 3], [4, 5, 6], [7, 8, 9] * u.m] self.names = ["a", "b", "c"] self.meta = OrderedDict([("a", 1), ("b", 2)]) def __astropy_table__(self, cls, copy, **kwargs): a, b, c = self.columns c.info.name = "c" cols = [table.Column(a, name="a"), table.MaskedColumn(b, name="b"), c] names = [col.info.name for col in cols] return cls(cols, names=names, copy=copy, meta=kwargs or self.meta) def test_simple_1(self): """Make a SimpleTable and convert to Table, QTable with copy=False, True""" for table_cls in (table.Table, table.QTable): col_c_class = u.Quantity if table_cls is table.QTable else table.Column for cpy in (False, True): st = self.SimpleTable() # Test putting in a non-native kwarg `extra_meta` to Table initializer t = table_cls(st, copy=cpy, extra_meta="extra!") assert t.colnames == ["a", "b", "c"] assert t.meta == {"extra_meta": "extra!"} assert np.all(t["a"] == st.columns[0]) assert np.all(t["b"] == st.columns[1]) vals = t["c"].value if table_cls is table.QTable else t["c"] assert np.all(st.columns[2].value == vals) assert isinstance(t["a"], table.Column) assert isinstance(t["b"], table.MaskedColumn) assert isinstance(t["c"], col_c_class) assert t["c"].unit is u.m assert type(t) is table_cls # Copy being respected? t["a"][0] = 10 assert st.columns[0][0] == 1 if cpy else 10 def test_simple_2(self): """Test converting a SimpleTable and changing column names and types""" st = self.SimpleTable() dtypes = [np.int32, np.float32, np.float16] names = ["a", "b", "c"] meta = OrderedDict([("c", 3)]) t = table.Table(st, dtype=dtypes, names=names, meta=meta) assert t.colnames == names assert all( col.dtype.type is dtype for col, dtype in zip(t.columns.values(), dtypes) ) # The supplied meta is overrides the existing meta. Changed in astropy 3.2. assert t.meta != st.meta assert t.meta == meta def test_kwargs_exception(self): """If extra kwargs provided but without initializing with a table-like object, exception is raised""" with pytest.raises(TypeError) as err: table.Table([[1]], extra_meta="extra!") assert "__init__() got unexpected keyword argument" in str(err.value) class TestUpdate: def _setup(self): self.a = Column((1, 2, 3), name="a") self.b = Column((4, 5, 6), name="b") self.c = Column((7, 8, 9), name="c") self.d = Column((10, 11, 12), name="d") def test_different_lengths(self): self._setup() t1 = Table([self.a]) t2 = Table([self.b[:-1]]) msg = "Inconsistent data column lengths" with pytest.raises(ValueError, match=msg): t1.update(t2) # If update didn't succeed then t1 and t2 should not have changed. assert t1.colnames == ["a"] assert np.all(t1["a"] == self.a) assert t2.colnames == ["b"] assert np.all(t2["b"] == self.b[:-1]) def test_invalid_inputs(self): # If input is invalid then nothing should be modified. self._setup() t = Table([self.a]) d = {"b": self.b, "c": [0]} msg = "Inconsistent data column lengths: {1, 3}" with pytest.raises(ValueError, match=msg): t.update(d) assert t.colnames == ["a"] assert np.all(t["a"] == self.a) assert d == {"b": self.b, "c": [0]} def test_metadata_conflict(self): self._setup() t1 = Table([self.a], meta={"a": 0, "b": [0], "c": True}) t2 = Table([self.b], meta={"a": 1, "b": [1]}) t2meta = copy.deepcopy(t2.meta) t1.update(t2) assert t1.meta == {"a": 1, "b": [0, 1], "c": True} # t2 metadata should not have changed. assert t2.meta == t2meta def test_update(self): self._setup() t1 = Table([self.a, self.b]) t2 = Table([self.b, self.c]) t2["b"] += 1 t1.update(t2) assert t1.colnames == ["a", "b", "c"] assert np.all(t1["a"] == self.a) assert np.all(t1["b"] == self.b + 1) assert np.all(t1["c"] == self.c) # t2 should not have changed. assert t2.colnames == ["b", "c"] assert np.all(t2["b"] == self.b + 1) assert np.all(t2["c"] == self.c) d = {"b": list(self.b), "d": list(self.d)} dc = copy.deepcopy(d) t2.update(d) assert t2.colnames == ["b", "c", "d"] assert np.all(t2["b"] == self.b) assert np.all(t2["c"] == self.c) assert np.all(t2["d"] == self.d) # d should not have changed. assert d == dc # Columns were copied, so changing t2 shouldn't have affected t1. assert t1.colnames == ["a", "b", "c"] assert np.all(t1["a"] == self.a) assert np.all(t1["b"] == self.b + 1) assert np.all(t1["c"] == self.c) def test_update_without_copy(self): self._setup() t1 = Table([self.a, self.b]) t2 = Table([self.b, self.c]) t1.update(t2, copy=False) t2["b"] -= 1 assert t1.colnames == ["a", "b", "c"] assert np.all(t1["a"] == self.a) assert np.all(t1["b"] == self.b - 1) assert np.all(t1["c"] == self.c) d = {"b": np.array(self.b), "d": np.array(self.d)} t2.update(d, copy=False) d["b"] *= 2 assert t2.colnames == ["b", "c", "d"] assert np.all(t2["b"] == 2 * self.b) assert np.all(t2["c"] == self.c) assert np.all(t2["d"] == self.d) def test_table_meta_copy(): """ Test no copy vs light (key) copy vs deep copy of table meta for different situations. #8404. """ t = table.Table([[1]]) meta = {1: [1, 2]} # Assigning meta directly implies using direct object reference t.meta = meta assert t.meta is meta # Table slice implies key copy, so values are unchanged t2 = t[:] assert t2.meta is not t.meta # NOT the same OrderedDict object but equal assert t2.meta == t.meta assert t2.meta[1] is t.meta[1] # Value IS the list same object # Table init with copy=False implies key copy t2 = table.Table(t, copy=False) assert t2.meta is not t.meta # NOT the same OrderedDict object but equal assert t2.meta == t.meta assert t2.meta[1] is t.meta[1] # Value IS the same list object # Table init with copy=True implies deep copy t2 = table.Table(t, copy=True) assert t2.meta is not t.meta # NOT the same OrderedDict object but equal assert t2.meta == t.meta assert t2.meta[1] is not t.meta[1] # Value is NOT the same list object def test_table_meta_copy_with_meta_arg(): """ Test no copy vs light (key) copy vs deep copy of table meta when meta is supplied as a table init argument. #8404. """ meta = {1: [1, 2]} meta2 = {2: [3, 4]} t = table.Table([[1]], meta=meta, copy=False) assert t.meta is meta t = table.Table([[1]], meta=meta) # default copy=True assert t.meta is not meta assert t.meta == meta # Test initializing from existing table with meta with copy=False t2 = table.Table(t, meta=meta2, copy=False) assert t2.meta is meta2 assert t2.meta != t.meta # Change behavior in #8404 # Test initializing from existing table with meta with default copy=True t2 = table.Table(t, meta=meta2) assert t2.meta is not meta2 assert t2.meta != t.meta # Change behavior in #8404 # Table init with copy=True and empty dict meta gets that empty dict t2 = table.Table(t, copy=True, meta={}) assert t2.meta == {} # Table init with copy=True and kwarg meta=None gets the original table dict. # This is a somewhat ambiguous case because it could be interpreted as the # user wanting NO meta set on the output. This could be implemented by inspecting # call args. t2 = table.Table(t, copy=True, meta=None) assert t2.meta == t.meta # Test initializing empty table with meta with copy=False t = table.Table(meta=meta, copy=False) assert t.meta is meta assert t.meta[1] is meta[1] # Test initializing empty table with meta with default copy=True (deepcopy meta) t = table.Table(meta=meta) assert t.meta is not meta assert t.meta == meta assert t.meta[1] is not meta[1] def test_replace_column_qtable(): """Replace existing Quantity column with a new column in a QTable""" a = [1, 2, 3] * u.m b = [4, 5, 6] t = table.QTable([a, b], names=["a", "b"]) ta = t["a"] tb = t["b"] ta.info.meta = {"aa": [0, 1, 2, 3, 4]} ta.info.format = "%f" t.replace_column("a", a.to("cm")) assert np.all(t["a"] == ta) assert t["a"] is not ta # New a column assert t["b"] is tb # Original b column unchanged assert t.colnames == ["a", "b"] assert t["a"].info.meta is None assert t["a"].info.format is None def test_replace_update_column_via_setitem(): """ Test table update like ``t['a'] = value``. This leverages off the already well-tested ``replace_column`` and in-place update ``t['a'][:] = value``, so this testing is fairly light. """ a = [1, 2] * u.m b = [3, 4] t = table.QTable([a, b], names=["a", "b"]) assert isinstance(t["a"], u.Quantity) # Inplace update ta = t["a"] t["a"] = 5 * u.m assert np.all(t["a"] == [5, 5] * u.m) assert t["a"] is ta # Replace t["a"] = [5, 6] assert np.all(t["a"] == [5, 6]) assert isinstance(t["a"], table.Column) assert t["a"] is not ta def test_replace_update_column_via_setitem_warnings_normal(): """ Test warnings related to table replace change in #5556: Normal warning-free replace """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"]) with table.conf.set_temp("replace_warnings", ["refcount", "attributes", "slice"]): t["a"] = 0 # in-place update t["a"] = [10, 20, 30] # replace column def test_replace_update_column_via_setitem_warnings_slice(): """ Test warnings related to table replace change in #5556: Replace a slice, one warning. """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"]) with table.conf.set_temp("replace_warnings", ["refcount", "attributes", "slice"]): t2 = t[:2] t2["a"] = 0 # in-place slice update assert np.all(t["a"] == [0, 0, 3]) with pytest.warns( TableReplaceWarning, match="replaced column 'a' which looks like an array slice", ) as w: t2["a"] = [10, 20] # replace slice assert len(w) == 1 def test_replace_update_column_via_setitem_warnings_attributes(): """ Test warnings related to table replace change in #5556: Lost attributes. """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"]) t["a"].unit = "m" with pytest.warns( TableReplaceWarning, match=r"replaced column 'a' " r"and column attributes \['unit'\]", ) as w: with table.conf.set_temp( "replace_warnings", ["refcount", "attributes", "slice"] ): t["a"] = [10, 20, 30] assert len(w) == 1 def test_replace_update_column_via_setitem_warnings_refcount(): """ Test warnings related to table replace change in #5556: Reference count changes. """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"]) ta = t["a"] # Generate an extra reference to original column with pytest.warns( TableReplaceWarning, match="replaced column 'a' and the number of references" ) as w: with table.conf.set_temp( "replace_warnings", ["refcount", "attributes", "slice"] ): t["a"] = [10, 20, 30] assert len(w) == 1 def test_replace_update_column_via_setitem_warnings_always(): """ Test warnings related to table replace change in #5556: Test 'always' setting that raises warning for any replace. """ from inspect import currentframe, getframeinfo t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"]) with table.conf.set_temp("replace_warnings", ["always"]): t["a"] = 0 # in-place slice update with pytest.warns(TableReplaceWarning, match="replaced column 'a'") as w: frameinfo = getframeinfo(currentframe()) t["a"] = [10, 20, 30] # replace column assert len(w) == 1 # Make sure the warning points back to the user code line assert w[0].lineno == frameinfo.lineno + 1 assert "test_table" in w[0].filename def test_replace_update_column_via_setitem_replace_inplace(): """ Test the replace_inplace config option related to #5556. In this case no replace is done. """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"]) ta = t["a"] t["a"].unit = "m" with table.conf.set_temp("replace_inplace", True): with table.conf.set_temp( "replace_warnings", ["always", "refcount", "attributes", "slice"] ): t["a"] = 0 # in-place update assert ta is t["a"] t["a"] = [10, 20, 30] # normally replaces column, but not now assert ta is t["a"] assert np.all(t["a"] == [10, 20, 30]) def test_primary_key_is_inherited(): """Test whether a new Table inherits the primary_key attribute from its parent Table. Issue #4672""" t = table.Table([(2, 3, 2, 1), (8, 7, 6, 5)], names=("a", "b")) t.add_index("a") original_key = t.primary_key # can't test if tuples are equal, so just check content assert original_key[0] == "a" t2 = t[:] t3 = t.copy() t4 = table.Table(t) # test whether the reference is the same in the following assert original_key == t2.primary_key assert original_key == t3.primary_key assert original_key == t4.primary_key # just test one element, assume rest are equal if assert passes assert t.loc[1] == t2.loc[1] assert t.loc[1] == t3.loc[1] assert t.loc[1] == t4.loc[1] def test_qtable_read_for_ipac_table_with_char_columns(): """Test that a char column of a QTable is assigned no unit and not a dimensionless unit, otherwise conversion of reader output to QTable fails.""" t1 = table.QTable([["A"]], names="B") out = StringIO() t1.write(out, format="ascii.ipac") t2 = table.QTable.read(out.getvalue(), format="ascii.ipac", guess=False) assert t2["B"].unit is None def test_create_table_from_final_row(): """Regression test for issue #8422: passing the last row of a table into Table should return a new table containing that row.""" t1 = table.Table([(1, 2)], names=["col"]) row = t1[-1] t2 = table.Table(row)["col"] assert t2[0] == 2 def test_key_values_in_as_array(): # Test for checking column slicing using key_values in Table.as_array() data_rows = [(1, 2.0, "x"), (4, 5.0, "y"), (5, 8.2, "z")] # Creating a table with three columns t1 = table.Table( rows=data_rows, names=("a", "b", "c"), meta={"name": "first table"}, dtype=("i4", "f8", "S1"), ) # Values of sliced column a,b is stored in a numpy array a = np.array([(1, 2.0), (4, 5.0), (5, 8.2)], dtype=[("a", "<i4"), ("b", "<f8")]) # Values for sliced column c is stored in a numpy array b = np.array([(b"x",), (b"y",), (b"z",)], dtype=[("c", "S1")]) # Comparing initialised array with sliced array using Table.as_array() assert np.array_equal(a, t1.as_array(names=["a", "b"])) assert np.array_equal(b, t1.as_array(names=["c"])) def test_tolist(): t = table.Table( [[1, 2, 3], [1.1, 2.2, 3.3], [b"foo", b"bar", b"hello"]], names=("a", "b", "c") ) assert t["a"].tolist() == [1, 2, 3] assert_array_equal(t["b"].tolist(), [1.1, 2.2, 3.3]) assert t["c"].tolist() == ["foo", "bar", "hello"] assert isinstance(t["a"].tolist()[0], int) assert isinstance(t["b"].tolist()[0], float) assert isinstance(t["c"].tolist()[0], str) t = table.Table( [[[1, 2], [3, 4]], [[b"foo", b"bar"], [b"hello", b"world"]]], names=("a", "c") ) assert t["a"].tolist() == [[1, 2], [3, 4]] assert t["c"].tolist() == [["foo", "bar"], ["hello", "world"]] assert isinstance(t["a"].tolist()[0][0], int) assert isinstance(t["c"].tolist()[0][0], str) class MyTable(Table): foo = TableAttribute() bar = TableAttribute(default=[]) baz = TableAttribute(default=1) def test_table_attribute(): assert repr(MyTable.baz) == "<TableAttribute name=baz default=1>" t = MyTable([[1, 2]]) # __attributes__ created on the fly on the first access of an attribute # that has a non-None default. assert "__attributes__" not in t.meta assert t.foo is None assert "__attributes__" not in t.meta assert t.baz == 1 assert "__attributes__" in t.meta t.bar.append(2.0) assert t.bar == [2.0] assert t.baz == 1 t.baz = "baz" assert t.baz == "baz" # Table attributes round-trip through pickle tp = pickle.loads(pickle.dumps(t)) assert tp.foo is None assert tp.baz == "baz" assert tp.bar == [2.0] # Allow initialization of attributes in table creation, with / without data for data in None, [[1, 2]]: t2 = MyTable(data, foo=3, bar="bar", baz="baz") assert t2.foo == 3 assert t2.bar == "bar" assert t2.baz == "baz" # Initializing from an existing MyTable works, with and without kwarg attrs t3 = MyTable(t2) assert t3.foo == 3 assert t3.bar == "bar" assert t3.baz == "baz" t3 = MyTable(t2, foo=5, bar="fubar") assert t3.foo == 5 assert t3.bar == "fubar" assert t3.baz == "baz" # Deleting attributes removes it from attributes del t.baz assert "baz" not in t.meta["__attributes__"] del t.bar assert "__attributes__" not in t.meta def test_table_attribute_ecsv(): # Table attribute round-trip through ECSV t = MyTable([[1, 2]], bar=[2.0], baz="baz") out = StringIO() t.write(out, format="ascii.ecsv") t2 = MyTable.read(out.getvalue(), format="ascii.ecsv") assert t2.foo is None assert t2.bar == [2.0] assert t2.baz == "baz" def test_table_attribute_fail(): # Code raises ValueError(f'{attr} not allowed as TableAttribute') but in this # context it gets re-raised as a RuntimeError during class definition. with pytest.raises(RuntimeError, match="Error calling __set_name__"): class MyTable2(Table): descriptions = TableAttribute() # Conflicts with init arg with pytest.raises(RuntimeError, match="Error calling __set_name__"): class MyTable3(Table): colnames = TableAttribute() # Conflicts with built-in property def test_set_units_fail(): dat = [[1.0, 2.0], ["aa", "bb"]] with pytest.raises( ValueError, match="sequence of unit values must match number of columns" ): Table(dat, units=[u.m]) with pytest.raises( ValueError, match="invalid column name c for setting unit attribute" ): Table(dat, units={"c": u.m}) def test_set_units(): dat = [[1.0, 2.0], ["aa", "bb"], [3, 4]] exp_units = (u.m, None, None) for cls in Table, QTable: for units in ({"a": u.m, "c": ""}, exp_units): qt = cls(dat, units=units, names=["a", "b", "c"]) if cls is QTable: assert isinstance(qt["a"], u.Quantity) assert isinstance(qt["b"], table.Column) assert isinstance(qt["c"], table.Column) for col, unit in zip(qt.itercols(), exp_units): assert col.info.unit is unit def test_set_descriptions(): dat = [[1.0, 2.0], ["aa", "bb"]] exp_descriptions = ("my description", None) for cls in Table, QTable: for descriptions in ({"a": "my description"}, exp_descriptions): qt = cls(dat, descriptions=descriptions, names=["a", "b"]) for col, description in zip(qt.itercols(), exp_descriptions): assert col.info.description == description def test_set_units_from_row(): text = ["a,b", ",s", "1,2", "3,4"] units = Table.read(text, format="ascii", data_start=1, data_end=2)[0] t = Table.read(text, format="ascii", data_start=2, units=units) assert isinstance(units, table.Row) assert t["a"].info.unit is None assert t["b"].info.unit is u.s def test_set_units_descriptions_read(): """Test setting units and descriptions via Table.read. The test here is less comprehensive because the implementation is exactly the same as for Table.__init__ (calling Table._set_column_attribute)""" for cls in Table, QTable: t = cls.read( ["a b", "1 2"], format="ascii", units=[u.m, u.s], descriptions=["hi", "there"], ) assert t["a"].info.unit is u.m assert t["b"].info.unit is u.s assert t["a"].info.description == "hi" assert t["b"].info.description == "there" def test_broadcasting_8933(): """Explicitly check re-work of code related to broadcasting in #8933""" t = table.Table([[1, 2]]) # Length=2 table t["a"] = [[3, 4]] # Can broadcast if ndim > 1 and shape[0] == 1 t["b"] = 5 t["c"] = [1] # Treat as broadcastable scalar, not length=1 array (which would fail) assert np.all(t["a"] == [[3, 4], [3, 4]]) assert np.all(t["b"] == [5, 5]) assert np.all(t["c"] == [1, 1]) # Test that broadcasted column is writeable t["c"][1] = 10 assert np.all(t["c"] == [1, 10]) def test_custom_masked_column_in_nonmasked_table(): """Test the refactor and change in column upgrades introduced in 95902650f. This fixes a regression introduced by #8789 (Change behavior of Table regarding masked columns).""" class MyMaskedColumn(table.MaskedColumn): pass class MySubMaskedColumn(MyMaskedColumn): pass class MyColumn(table.Column): pass class MySubColumn(MyColumn): pass class MyTable(table.Table): Column = MyColumn MaskedColumn = MyMaskedColumn a = table.Column([1]) b = table.MaskedColumn([2], mask=[True]) c = MyMaskedColumn([3], mask=[True]) d = MySubColumn([4]) e = MySubMaskedColumn([5], mask=[True]) # Two different pathways for making table t1 = MyTable([a, b, c, d, e], names=["a", "b", "c", "d", "e"]) t2 = MyTable() t2["a"] = a t2["b"] = b t2["c"] = c t2["d"] = d t2["e"] = e for t in (t1, t2): assert type(t["a"]) is MyColumn assert type(t["b"]) is MyMaskedColumn # upgrade assert type(t["c"]) is MyMaskedColumn assert type(t["d"]) is MySubColumn assert type(t["e"]) is MySubMaskedColumn # sub-class not downgraded def test_sort_with_mutable_skycoord(): """Test sorting a table that has a mutable column such as SkyCoord. In this case the sort is done in-place """ t = Table([[2, 1], SkyCoord([4, 3], [6, 5], unit="deg,deg")], names=["a", "sc"]) meta = {"a": [1, 2]} ta = t["a"] tsc = t["sc"] t["sc"].info.meta = meta t.sort("a") assert np.all(t["a"] == [1, 2]) assert np.allclose(t["sc"].ra.to_value(u.deg), [3, 4]) assert np.allclose(t["sc"].dec.to_value(u.deg), [5, 6]) assert t["a"] is ta assert t["sc"] is tsc # Prior to astropy 4.1 this was a deep copy of SkyCoord column; after 4.1 # it is a reference. t["sc"].info.meta["a"][0] = 100 assert meta["a"][0] == 100 def test_sort_with_non_mutable(): """Test sorting a table that has a non-mutable column.""" t = Table([[2, 1], [3, 4]], names=["a", "b"]) ta = t["a"] tb = t["b"] t["b"].setflags(write=False) meta = {"a": [1, 2]} t["b"].info.meta = meta t.sort("a") assert np.all(t["a"] == [1, 2]) assert np.all(t["b"] == [4, 3]) assert ta is t["a"] assert tb is not t["b"] # Prior to astropy 4.1 this was a deep copy of SkyCoord column; after 4.1 # it is a reference. t["b"].info.meta["a"][0] = 100 assert meta["a"][0] == 1 def test_init_with_list_of_masked_arrays(): """Test the fix for #8977""" m0 = np.ma.array([0, 1, 2], mask=[True, False, True]) m1 = np.ma.array([3, 4, 5], mask=[False, True, False]) mc = [m0, m1] # Test _init_from_list t = table.Table([mc], names=["a"]) # Test add_column t["b"] = [m1, m0] assert t["a"].shape == (2, 3) assert np.all(t["a"][0] == m0) assert np.all(t["a"][1] == m1) assert np.all(t["a"][0].mask == m0.mask) assert np.all(t["a"][1].mask == m1.mask) assert t["b"].shape == (2, 3) assert np.all(t["b"][0] == m1) assert np.all(t["b"][1] == m0) assert np.all(t["b"][0].mask == m1.mask) assert np.all(t["b"][1].mask == m0.mask) def test_data_to_col_convert_strategy(): """Test the update to how data_to_col works (#8972), using the regression example from #8971. """ t = table.Table([[0, 1]]) t["a"] = 1 t["b"] = np.int64(2) # Failed previously assert np.all(t["a"] == [1, 1]) assert np.all(t["b"] == [2, 2]) def test_structured_masked_column(): """Test that adding a masked ndarray with a structured dtype works""" dtype = np.dtype([("z", "f8"), ("x", "f8"), ("y", "i4")]) t = Table() t["a"] = np.ma.array( [ (1, 2, 3), (4, 5, 6), ], mask=[ (False, False, True), (False, True, False), ], dtype=dtype, ) assert np.all(t["a"]["z"].mask == [False, False]) assert np.all(t["a"]["x"].mask == [False, True]) assert np.all(t["a"]["y"].mask == [True, False]) assert isinstance(t["a"], MaskedColumn) def test_rows_with_mixins(): """Test for #9165 to allow adding a list of mixin objects. Also test for fix to #9357 where group_by() failed due to mixin object not having info.indices set to []. """ tm = Time([1, 2], format="cxcsec") q = [1, 2] * u.m mixed1 = [1 * u.m, 2] # Mixed input, fails to convert to Quantity mixed2 = [2, 1 * u.m] # Mixed input, not detected as potential mixin rows = [ (1, q[0], tm[0]), (2, q[1], tm[1]), ] t = table.QTable(rows=rows) t["a"] = [q[0], q[1]] t["b"] = [tm[0], tm[1]] t["m1"] = mixed1 t["m2"] = mixed2 assert np.all(t["col1"] == q) assert np.all(t["col2"] == tm) assert np.all(t["a"] == q) assert np.all(t["b"] == tm) assert np.all(t["m1"][ii] == mixed1[ii] for ii in range(2)) assert np.all(t["m2"][ii] == mixed2[ii] for ii in range(2)) assert type(t["m1"]) is table.Column assert t["m1"].dtype is np.dtype(object) assert type(t["m2"]) is table.Column assert t["m2"].dtype is np.dtype(object) # Ensure group_by() runs without failing for sortable columns. # The columns 'm1', and 'm2' are object dtype and not sortable. for name in ["col0", "col1", "col2", "a", "b"]: t.group_by(name) # For good measure include exactly the failure in #9357 in which the # list of Time() objects is in the Table initializer. mjds = [Time(58000, format="mjd")] t = Table([mjds, ["gbt"]], names=("mjd", "obs")) t.group_by("obs") def test_iterrows(): dat = [ (1, 2, 3), (4, 5, 6), (7, 8, 6), ] t = table.Table(rows=dat, names=("a", "b", "c")) c_s = [] a_s = [] for c, a in t.iterrows("c", "a"): a_s.append(a) c_s.append(c) assert np.all(t["a"] == a_s) assert np.all(t["c"] == c_s) rows = [row for row in t.iterrows()] assert rows == dat with pytest.raises(ValueError, match="d is not a valid column name"): t.iterrows("d") def test_values_and_types(): dat = [ (1, 2, 3), (4, 5, 6), (7, 8, 6), ] t = table.Table(rows=dat, names=("a", "b", "c")) assert isinstance(t.values(), type(OrderedDict().values())) assert isinstance(t.columns.values(), type(OrderedDict().values())) assert isinstance(t.columns.keys(), type(OrderedDict().keys())) for i in t.values(): assert isinstance(i, table.column.Column) def test_items(): dat = [ (1, 2, 3), (4, 5, 6), (7, 8, 9), ] t = table.Table(rows=dat, names=("a", "b", "c")) assert isinstance(t.items(), type(OrderedDict({}).items())) for i in list(t.items()): assert isinstance(i, tuple) def test_read_write_not_replaceable(): t = table.Table() with pytest.raises(AttributeError): t.read = "fake_read" with pytest.raises(AttributeError): t.write = "fake_write" def test_keep_columns_with_generator(): # Regression test for #12529 t = table.table_helpers.simple_table(1) t.keep_columns(col for col in t.colnames if col == "a") assert t.colnames == ["a"] def test_remove_columns_with_generator(): # Regression test for #12529 t = table.table_helpers.simple_table(1) t.remove_columns(col for col in t.colnames if col == "a") assert t.colnames == ["b", "c"] def test_keep_columns_invalid_names_messages(): t = table.table_helpers.simple_table(1) with pytest.raises(KeyError, match='column "d" does not exist'): t.keep_columns(["c", "d"]) with pytest.raises(KeyError, match="columns {'[de]', '[de]'} do not exist"): t.keep_columns(["c", "d", "e"]) def test_remove_columns_invalid_names_messages(): t = table.table_helpers.simple_table(1) with pytest.raises(KeyError, match='column "d" does not exist'): t.remove_columns(["c", "d"]) with pytest.raises(KeyError, match="columns {'[de]', '[de]'} do not exist"): t.remove_columns(["c", "d", "e"]) @pytest.mark.parametrize("path_type", ["str", "Path"]) def test_read_write_tilde_path(path_type, home_is_tmpdir): if path_type == "str": test_file = os.path.join("~", "test.csv") else: test_file = pathlib.Path("~", "test.csv") t1 = Table() t1["a"] = [1, 2, 3] t1.write(test_file) t2 = Table.read(test_file) assert np.all(t2["a"] == [1, 2, 3]) # Ensure the data wasn't written to the literal tilde-prefixed path assert not os.path.exists(test_file) def test_add_list_order(): t = Table() names = list(map(str, range(20))) array = np.empty((20, 1)) t.add_columns(array, names=names) assert t.colnames == names
20a707b506411263023f4464307b8fc1c82c8854fd40dcc860b0b7aec1834470
# Licensed under a 3-clause BSD style license - see LICENSE.rst import warnings from collections import OrderedDict from copy import deepcopy from io import StringIO import numpy as np import pytest from astropy import coordinates, table, time from astropy import units as u from astropy.table.info import serialize_method_as from astropy.table.table_helpers import simple_table from astropy.utils.data_info import data_info_factory, dtype_info_name def test_table_info_attributes(table_types): """ Test the info() method of printing a summary of table column attributes """ a = np.array([1, 2, 3], dtype="int32") b = np.array([1, 2, 3], dtype="float32") c = np.array(["a", "c", "e"], dtype="|S1") t = table_types.Table([a, b, c], names=["a", "b", "c"]) # Minimal output for a typical table tinfo = t.info(out=None) subcls = ["class"] if table_types.Table.__name__ == "MyTable" else [] assert tinfo.colnames == [ "name", "dtype", "shape", "unit", "format", "description", "class", "n_bad", "length", ] assert np.all(tinfo["name"] == ["a", "b", "c"]) assert np.all(tinfo["dtype"] == ["int32", "float32", dtype_info_name("S1")]) if subcls: assert np.all(tinfo["class"] == ["MyColumn"] * 3) # All output fields including a mixin column t["d"] = [1, 2, 3] * u.m t["d"].description = "quantity" t["a"].format = "%02d" t["e"] = time.Time([1, 2, 3], format="mjd") t["e"].info.description = "time" t["f"] = coordinates.SkyCoord([1, 2, 3], [1, 2, 3], unit="deg") t["f"].info.description = "skycoord" tinfo = t.info(out=None) assert np.all(tinfo["name"] == "a b c d e f".split()) assert np.all( tinfo["dtype"] == ["int32", "float32", dtype_info_name("S1"), "float64", "object", "object"] ) assert np.all(tinfo["unit"] == ["", "", "", "m", "", "deg,deg"]) assert np.all(tinfo["format"] == ["%02d", "", "", "", "", ""]) assert np.all(tinfo["description"] == ["", "", "", "quantity", "time", "skycoord"]) cls = t.ColumnClass.__name__ assert np.all(tinfo["class"] == [cls, cls, cls, cls, "Time", "SkyCoord"]) # Test that repr(t.info) is same as t.info() out = StringIO() t.info(out=out) assert repr(t.info) == out.getvalue() def test_table_info_stats(table_types): """ Test the info() method of printing a summary of table column statistics """ a = np.array([1, 2, 1, 2], dtype="int32") b = np.array([1, 2, 1, 2], dtype="float32") c = np.array(["a", "c", "e", "f"], dtype="|S1") d = time.Time([1, 2, 1, 2], format="mjd", scale="tai") t = table_types.Table([a, b, c, d], names=["a", "b", "c", "d"]) # option = 'stats' masked = "masked=True " if t.masked else "" out = StringIO() t.info("stats", out=out) table_header_line = f"<{t.__class__.__name__} {masked}length=4>" exp = [ table_header_line, "name mean std min max", "---- ---- --- --- ---", " a 1.5 0.5 1 2", " b 1.5 0.5 1 2", " c -- -- -- --", " d 1.5 -- 1.0 2.0", ] assert out.getvalue().splitlines() == exp # option = ['attributes', 'stats'] tinfo = t.info(["attributes", "stats"], out=None) assert tinfo.colnames == [ "name", "dtype", "shape", "unit", "format", "description", "class", "mean", "std", "min", "max", "n_bad", "length", ] assert np.all(tinfo["mean"] == ["1.5", "1.5", "--", "1.5"]) assert np.all(tinfo["std"] == ["0.5", "0.5", "--", "--"]) assert np.all(tinfo["min"] == ["1", "1", "--", "1.0"]) assert np.all(tinfo["max"] == ["2", "2", "--", "2.0"]) out = StringIO() t.info("stats", out=out) exp = [ table_header_line, "name mean std min max", "---- ---- --- --- ---", " a 1.5 0.5 1 2", " b 1.5 0.5 1 2", " c -- -- -- --", " d 1.5 -- 1.0 2.0", ] assert out.getvalue().splitlines() == exp # option = ['attributes', custom] custom = data_info_factory( names=["sum", "first"], funcs=[np.sum, lambda col: col[0]] ) out = StringIO() tinfo = t.info(["attributes", custom], out=None) assert tinfo.colnames == [ "name", "dtype", "shape", "unit", "format", "description", "class", "sum", "first", "n_bad", "length", ] assert np.all(tinfo["name"] == ["a", "b", "c", "d"]) assert np.all( tinfo["dtype"] == ["int32", "float32", dtype_info_name("S1"), "object"] ) assert np.all(tinfo["sum"] == ["6", "6", "--", "--"]) assert np.all(tinfo["first"] == ["1", "1", "a", "1.0"]) def test_data_info(): """ Test getting info for just a column. """ cols = [ table.Column( [1.0, 2.0, np.nan], name="name", description="description", unit="m/s" ), table.MaskedColumn( [1.0, 2.0, 3.0], name="name", description="description", unit="m/s", mask=[False, False, True], ), ] for c in cols: # Test getting the full ordered dict cinfo = c.info(out=None) assert cinfo == OrderedDict( [ ("name", "name"), ("dtype", "float64"), ("shape", ""), ("unit", "m / s"), ("format", ""), ("description", "description"), ("class", type(c).__name__), ("n_bad", 1), ("length", 3), ] ) # Test the console (string) version which omits trivial values out = StringIO() c.info(out=out) exp = [ "name = name", "dtype = float64", "unit = m / s", "description = description", f"class = {type(c).__name__}", "n_bad = 1", "length = 3", ] assert out.getvalue().splitlines() == exp # repr(c.info) gives the same as c.info() assert repr(c.info) == out.getvalue() # Test stats info cinfo = c.info("stats", out=None) assert cinfo == OrderedDict( [ ("name", "name"), ("mean", "1.5"), ("std", "0.5"), ("min", "1"), ("max", "2"), ("n_bad", 1), ("length", 3), ] ) def test_data_info_subclass(): class Column(table.Column): """ Confusingly named Column on purpose, but that is legal. """ pass for data in ([], [1, 2]): c = Column(data, dtype="int64") cinfo = c.info(out=None) assert cinfo == OrderedDict( [ ("dtype", "int64"), ("shape", ""), ("unit", ""), ("format", ""), ("description", ""), ("class", "Column"), ("n_bad", 0), ("length", len(data)), ] ) def test_scalar_info(): """ Make sure info works with scalar values """ c = time.Time("2000:001") cinfo = c.info(out=None) assert cinfo["n_bad"] == 0 assert "length" not in cinfo def test_empty_table(): t = table.Table() out = StringIO() t.info(out=out) exp = ["<Table length=0>", "<No columns>"] assert out.getvalue().splitlines() == exp def test_class_attribute(): """ Test that class info column is suppressed only for identical non-mixin columns. """ vals = [[1] * u.m, [2] * u.m] texp = [ "<Table length=1>", "name dtype unit", "---- ------- ----", "col0 float64 m", "col1 float64 m", ] qexp = [ "<QTable length=1>", "name dtype unit class ", "---- ------- ---- --------", "col0 float64 m Quantity", "col1 float64 m Quantity", ] for table_cls, exp in ((table.Table, texp), (table.QTable, qexp)): t = table_cls(vals) out = StringIO() t.info(out=out) assert out.getvalue().splitlines() == exp def test_ignore_warnings(): t = table.Table([[np.nan, np.nan]]) with warnings.catch_warnings(record=True) as warns: t.info("stats", out=None) assert len(warns) == 0 def test_no_deprecation_warning(): # regression test for #5459, where numpy deprecation warnings were # emitted unnecessarily. t = simple_table() with warnings.catch_warnings(record=True) as warns: t.info() assert len(warns) == 0 def test_lost_parent_error(): c = table.Column([1, 2, 3], name="a") with pytest.raises(AttributeError, match='failed to access "info" attribute'): c[:].info.name def test_info_serialize_method(): """ Unit test of context manager to set info.serialize_method. Normally just used to set this for writing a Table to file (FITS, ECSV, HDF5). """ t = table.Table( { "tm": time.Time([1, 2], format="cxcsec"), "sc": coordinates.SkyCoord([1, 2], [1, 2], unit="deg"), "mc": table.MaskedColumn([1, 2], mask=[True, False]), "mc2": table.MaskedColumn([1, 2], mask=[True, False]), } ) origs = {} for name in ("tm", "mc", "mc2"): origs[name] = deepcopy(t[name].info.serialize_method) # Test setting by name and getting back to originals with serialize_method_as(t, {"tm": "test_tm", "mc": "test_mc"}): for name in ("tm", "mc"): assert all( t[name].info.serialize_method[key] == "test_" + name for key in t[name].info.serialize_method ) assert t["mc2"].info.serialize_method == origs["mc2"] assert not hasattr(t["sc"].info, "serialize_method") for name in ("tm", "mc", "mc2"): assert t[name].info.serialize_method == origs[name] # dict compare assert not hasattr(t["sc"].info, "serialize_method") # Test setting by name and class, where name takes precedence. Also # test that it works for subclasses. with serialize_method_as( t, {"tm": "test_tm", "mc": "test_mc", table.Column: "test_mc2"} ): for name in ("tm", "mc", "mc2"): assert all( t[name].info.serialize_method[key] == "test_" + name for key in t[name].info.serialize_method ) assert not hasattr(t["sc"].info, "serialize_method") for name in ("tm", "mc", "mc2"): assert t[name].info.serialize_method == origs[name] # dict compare assert not hasattr(t["sc"].info, "serialize_method") # Test supplying a single string that all applies to all columns with # a serialize_method. with serialize_method_as(t, "test"): for name in ("tm", "mc", "mc2"): assert all( t[name].info.serialize_method[key] == "test" for key in t[name].info.serialize_method ) assert not hasattr(t["sc"].info, "serialize_method") for name in ("tm", "mc", "mc2"): assert t[name].info.serialize_method == origs[name] # dict compare assert not hasattr(t["sc"].info, "serialize_method") def test_info_serialize_method_exception(): """ Unit test of context manager to set info.serialize_method. Normally just used to set this for writing a Table to file (FITS, ECSV, HDF5). """ t = simple_table(masked=True) origs = deepcopy(t["a"].info.serialize_method) try: with serialize_method_as(t, "test"): assert all( t["a"].info.serialize_method[key] == "test" for key in t["a"].info.serialize_method ) raise ZeroDivisionError() except ZeroDivisionError: pass assert t["a"].info.serialize_method == origs # dict compare
61e46d3811e887baddd71e1e778b3f60345be11fddc5e04b5c5d42cb4a5b93ff
# Licensed under a 3-clause BSD style license - see LICENSE.rst import warnings import numpy as np import pytest from astropy import units as u from astropy.table import Column, QTable, Row, Table, hstack from astropy.table.bst import BST from astropy.table.column import BaseColumn from astropy.table.index import SlicedIndex, get_index from astropy.table.soco import SCEngine from astropy.table.sorted_array import SortedArray from astropy.time import Time from astropy.utils.compat.optional_deps import HAS_SORTEDCONTAINERS from .test_table import SetupData available_engines = [BST, SortedArray] if HAS_SORTEDCONTAINERS: available_engines.append(SCEngine) @pytest.fixture(params=available_engines) def engine(request): return request.param _col = [1, 2, 3, 4, 5] @pytest.fixture( params=[ _col, u.Quantity(_col), Time(_col, format="jyear"), ] ) def main_col(request): return request.param def assert_col_equal(col, array): if isinstance(col, Time): assert np.all(col == Time(array, format="jyear")) else: assert np.all(col == col.__class__(array)) @pytest.mark.usefixtures("table_types") class TestIndex(SetupData): def _setup(self, main_col, table_types): super()._setup(table_types) self.main_col = main_col if isinstance(main_col, u.Quantity): self._table_type = QTable if not isinstance(main_col, list): self._column_type = lambda x: x # don't change mixin type self.mutable = isinstance(main_col, (list, u.Quantity)) def make_col(self, name, lst): return self._column_type(lst, name=name) def make_val(self, val): if isinstance(self.main_col, Time): return Time(val, format="jyear") return val @property def t(self): if not hasattr(self, "_t"): # Note that order of columns is important, and the 'a' column is # last to ensure that the index column does not need to be the first # column (as was discovered in #10025). Most testing uses 'a' and # ('a', 'b') for the columns. self._t = self._table_type() self._t["b"] = self._column_type([4.0, 5.1, 6.2, 7.0, 1.1]) self._t["c"] = self._column_type(["7", "8", "9", "10", "11"]) self._t["a"] = self._column_type(self.main_col) return self._t @pytest.mark.parametrize("composite", [False, True]) def test_table_index(self, main_col, table_types, composite, engine): self._setup(main_col, table_types) t = self.t t.add_index(("a", "b") if composite else "a", engine=engine) assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) if not self.mutable: return # test altering table columns t["a"][0] = 4 t.add_row((6.0, "7", 6)) t["a"][3] = 10 t.remove_row(2) t.add_row((5.0, "9", 4)) assert_col_equal(t["a"], np.array([4, 2, 10, 5, 6, 4])) assert np.allclose(t["b"], np.array([4.0, 5.1, 7.0, 1.1, 6.0, 5.0])) assert np.all(t["c"].data == np.array(["7", "8", "10", "11", "7", "9"])) index = t.indices[0] ll = list(index.data.items()) if composite: assert np.all( ll == [ ((2, 5.1), [1]), ((4, 4.0), [0]), ((4, 5.0), [5]), ((5, 1.1), [3]), ((6, 6.0), [4]), ((10, 7.0), [2]), ] ) else: assert np.all( ll == [((2,), [1]), ((4,), [0, 5]), ((5,), [3]), ((6,), [4]), ((10,), [2])] ) t.remove_indices("a") assert len(t.indices) == 0 def test_table_slicing(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t t.add_index("a", engine=engine) assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) for slice_ in ([0, 2], np.array([0, 2])): t2 = t[slice_] # t2 should retain an index on column 'a' assert len(t2.indices) == 1 assert_col_equal(t2["a"], [1, 3]) # the index in t2 should reorder row numbers after slicing assert np.all(t2.indices[0].sorted_data() == [0, 1]) # however, this index should be a deep copy of t1's index assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) def test_remove_rows(self, main_col, table_types, engine): self._setup(main_col, table_types) if not self.mutable: return t = self.t t.add_index("a", engine=engine) # remove individual row t2 = t.copy() t2.remove_rows(2) assert_col_equal(t2["a"], [1, 2, 4, 5]) assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3]) # remove by list, ndarray, or slice for cut in ([0, 2, 4], np.array([0, 2, 4]), slice(0, 5, 2)): t2 = t.copy() t2.remove_rows(cut) assert_col_equal(t2["a"], [2, 4]) assert np.all(t2.indices[0].sorted_data() == [0, 1]) with pytest.raises(ValueError): t.remove_rows((0, 2, 4)) def test_col_get_slice(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t t.add_index("a", engine=engine) # get slice t2 = t[1:3] # table slice assert_col_equal(t2["a"], [2, 3]) assert np.all(t2.indices[0].sorted_data() == [0, 1]) col_slice = t["a"][1:3] assert_col_equal(col_slice, [2, 3]) # true column slices discard indices if isinstance(t["a"], BaseColumn): assert len(col_slice.info.indices) == 0 # take slice of slice t2 = t[::2] assert_col_equal(t2["a"], np.array([1, 3, 5])) t3 = t2[::-1] assert_col_equal(t3["a"], np.array([5, 3, 1])) assert np.all(t3.indices[0].sorted_data() == [2, 1, 0]) t3 = t2[:2] assert_col_equal(t3["a"], np.array([1, 3])) assert np.all(t3.indices[0].sorted_data() == [0, 1]) # out-of-bound slices for t_empty in (t2[3:], t2[2:1], t3[2:]): assert len(t_empty["a"]) == 0 assert np.all(t_empty.indices[0].sorted_data() == []) if self.mutable: # get boolean mask mask = t["a"] % 2 == 1 t2 = t[mask] assert_col_equal(t2["a"], [1, 3, 5]) assert np.all(t2.indices[0].sorted_data() == [0, 1, 2]) def test_col_set_slice(self, main_col, table_types, engine): self._setup(main_col, table_types) if not self.mutable: return t = self.t t.add_index("a", engine=engine) # set slice t2 = t.copy() t2["a"][1:3] = np.array([6, 7]) assert_col_equal(t2["a"], np.array([1, 6, 7, 4, 5])) assert np.all(t2.indices[0].sorted_data() == [0, 3, 4, 1, 2]) # change original table via slice reference t2 = t.copy() t3 = t2[1:3] assert_col_equal(t3["a"], np.array([2, 3])) assert np.all(t3.indices[0].sorted_data() == [0, 1]) t3["a"][0] = 5 assert_col_equal(t3["a"], np.array([5, 3])) assert_col_equal(t2["a"], np.array([1, 5, 3, 4, 5])) assert np.all(t3.indices[0].sorted_data() == [1, 0]) assert np.all(t2.indices[0].sorted_data() == [0, 2, 3, 1, 4]) # set boolean mask t2 = t.copy() mask = t["a"] % 2 == 1 t2["a"][mask] = 0.0 assert_col_equal(t2["a"], [0, 2, 0, 4, 0]) assert np.all(t2.indices[0].sorted_data() == [0, 2, 4, 1, 3]) def test_multiple_slices(self, main_col, table_types, engine): self._setup(main_col, table_types) if not self.mutable: return t = self.t t.add_index("a", engine=engine) for i in range(6, 51): t.add_row((1.0, "A", i)) assert_col_equal(t["a"], [i for i in range(1, 51)]) assert np.all(t.indices[0].sorted_data() == [i for i in range(50)]) evens = t[::2] assert np.all(evens.indices[0].sorted_data() == [i for i in range(25)]) reverse = evens[::-1] index = reverse.indices[0] assert (index.start, index.stop, index.step) == (48, -2, -2) assert np.all(index.sorted_data() == [i for i in range(24, -1, -1)]) # modify slice of slice reverse[-10:] = 0 expected = np.array([i for i in range(1, 51)]) expected[:20][expected[:20] % 2 == 1] = 0 assert_col_equal(t["a"], expected) assert_col_equal(evens["a"], expected[::2]) assert_col_equal(reverse["a"], expected[::2][::-1]) # first ten evens are now zero assert np.all( t.indices[0].sorted_data() == ( [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19] + [i for i in range(20, 50)] ) ) assert np.all(evens.indices[0].sorted_data() == [i for i in range(25)]) assert np.all( reverse.indices[0].sorted_data() == [i for i in range(24, -1, -1)] ) # try different step sizes of slice t2 = t[1:20:2] assert_col_equal(t2["a"], [2, 4, 6, 8, 10, 12, 14, 16, 18, 20]) assert np.all(t2.indices[0].sorted_data() == [i for i in range(10)]) t3 = t2[::3] assert_col_equal(t3["a"], [2, 8, 14, 20]) assert np.all(t3.indices[0].sorted_data() == [0, 1, 2, 3]) t4 = t3[2::-1] assert_col_equal(t4["a"], [14, 8, 2]) assert np.all(t4.indices[0].sorted_data() == [2, 1, 0]) def test_sort(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t[::-1] # reverse table assert_col_equal(t["a"], [5, 4, 3, 2, 1]) t.add_index("a", engine=engine) assert np.all(t.indices[0].sorted_data() == [4, 3, 2, 1, 0]) if not self.mutable: return # sort table by column a t2 = t.copy() t2.sort("a") assert_col_equal(t2["a"], [1, 2, 3, 4, 5]) assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4]) # sort table by primary key t2 = t.copy() t2.sort() assert_col_equal(t2["a"], [1, 2, 3, 4, 5]) assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4]) def test_insert_row(self, main_col, table_types, engine): self._setup(main_col, table_types) if not self.mutable: return t = self.t t.add_index("a", engine=engine) t.insert_row(2, (1.0, "12", 6)) assert_col_equal(t["a"], [1, 2, 6, 3, 4, 5]) assert np.all(t.indices[0].sorted_data() == [0, 1, 3, 4, 5, 2]) t.insert_row(1, (4.0, "13", 0)) assert_col_equal(t["a"], [1, 0, 2, 6, 3, 4, 5]) assert np.all(t.indices[0].sorted_data() == [1, 0, 2, 4, 5, 6, 3]) def test_index_modes(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t t.add_index("a", engine=engine) # first, no special mode assert len(t[[1, 3]].indices) == 1 assert len(t[::-1].indices) == 1 assert len(self._table_type(t).indices) == 1 assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) t2 = t.copy() # non-copy mode with t.index_mode("discard_on_copy"): assert len(t[[1, 3]].indices) == 0 assert len(t[::-1].indices) == 0 assert len(self._table_type(t).indices) == 0 assert len(t2.copy().indices) == 1 # mode should only affect t # make sure non-copy mode is exited correctly assert len(t[[1, 3]].indices) == 1 if not self.mutable: return # non-modify mode with t.index_mode("freeze"): assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) t["a"][0] = 6 assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) t.add_row((1.5, "12", 2)) assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) t.remove_rows([1, 3]) assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) assert_col_equal(t["a"], [6, 3, 5, 2]) # mode should only affect t assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4]) t2["a"][0] = 6 assert np.all(t2.indices[0].sorted_data() == [1, 2, 3, 4, 0]) # make sure non-modify mode is exited correctly assert np.all(t.indices[0].sorted_data() == [3, 1, 2, 0]) if isinstance(t["a"], BaseColumn): assert len(t["a"][::-1].info.indices) == 0 with t.index_mode("copy_on_getitem"): assert len(t["a"][[1, 2]].info.indices) == 1 # mode should only affect t assert len(t2["a"][[1, 2]].info.indices) == 0 assert len(t["a"][::-1].info.indices) == 0 assert len(t2["a"][::-1].info.indices) == 0 def test_index_retrieval(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t t.add_index("a", engine=engine) t.add_index(["a", "c"], engine=engine) assert len(t.indices) == 2 assert len(t.indices["a"].columns) == 1 assert len(t.indices["a", "c"].columns) == 2 with pytest.raises(IndexError): t.indices["b"] def test_col_rename(self, main_col, table_types, engine): """ Checks for a previous bug in which copying a Table with different column names raised an exception. """ self._setup(main_col, table_types) t = self.t t.add_index("a", engine=engine) t2 = self._table_type(self.t, names=["d", "e", "f"]) assert len(t2.indices) == 1 def test_table_loc(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t t.add_index("a", engine=engine) t.add_index("b", engine=engine) t2 = t.loc[self.make_val(3)] # single label, with primary key 'a' assert_col_equal(t2["a"], [3]) assert isinstance(t2, Row) # list search t2 = t.loc[[self.make_val(1), self.make_val(4), self.make_val(2)]] assert_col_equal(t2["a"], [1, 4, 2]) # same order as input list if not isinstance(main_col, Time): # ndarray search t2 = t.loc[np.array([1, 4, 2])] assert_col_equal(t2["a"], [1, 4, 2]) assert_col_equal(t2["a"], [1, 4, 2]) t2 = t.loc[self.make_val(3) : self.make_val(5)] # range search assert_col_equal(t2["a"], [3, 4, 5]) t2 = t.loc["b", 5.0:7.0] assert_col_equal(t2["b"], [5.1, 6.2, 7.0]) # search by sorted index t2 = t.iloc[0:2] # two smallest rows by column 'a' assert_col_equal(t2["a"], [1, 2]) t2 = t.iloc["b", 2:] # exclude two smallest rows in column 'b' assert_col_equal(t2["b"], [5.1, 6.2, 7.0]) for t2 in (t.loc[:], t.iloc[:]): assert_col_equal(t2["a"], [1, 2, 3, 4, 5]) def test_table_loc_indices(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t t.add_index("a", engine=engine) t.add_index("b", engine=engine) t2 = t.loc_indices[self.make_val(3)] # single label, with primary key 'a' assert t2 == 2 # list search t2 = t.loc_indices[[self.make_val(1), self.make_val(4), self.make_val(2)]] for i, p in zip(t2, [1, 4, 2]): # same order as input list assert i == p - 1 def test_invalid_search(self, main_col, table_types, engine): # using .loc and .loc_indices with a value not present should raise an exception self._setup(main_col, table_types) t = self.t t.add_index("a") with pytest.raises(KeyError): t.loc[self.make_val(6)] with pytest.raises(KeyError): t.loc_indices[self.make_val(6)] def test_copy_index_references(self, main_col, table_types, engine): # check against a bug in which indices were given an incorrect # column reference when copied self._setup(main_col, table_types) t = self.t t.add_index("a") t.add_index("b") t2 = t.copy() assert t2.indices["a"].columns[0] is t2["a"] assert t2.indices["b"].columns[0] is t2["b"] def test_unique_index(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t t.add_index("a", engine=engine, unique=True) assert np.all(t.indices["a"].sorted_data() == [0, 1, 2, 3, 4]) if self.mutable: with pytest.raises(ValueError): t.add_row((5.0, "9", 5)) def test_copy_indexed_table(self, table_types): self._setup(_col, table_types) t = self.t t.add_index("a") t.add_index(["a", "b"]) for tp in (self._table_type(t), t.copy()): assert len(t.indices) == len(tp.indices) for index, indexp in zip(t.indices, tp.indices): assert np.all(index.data.data == indexp.data.data) assert index.data.data.colnames == indexp.data.data.colnames def test_updating_row_byindex(self, main_col, table_types, engine): self._setup(main_col, table_types) t = Table( [["a", "b", "c", "d"], [2, 3, 4, 5], [3, 4, 5, 6]], names=("a", "b", "c"), meta={"name": "first table"}, ) t.add_index("a", engine=engine) t.add_index("b", engine=engine) t.loc["c"] = ["g", 40, 50] # single label, with primary key 'a' t2 = t[2] assert list(t2) == ["g", 40, 50] # list search t.loc[["a", "d", "b"]] = [["a", 20, 30], ["d", 50, 60], ["b", 30, 40]] t2 = [["a", 20, 30], ["d", 50, 60], ["b", 30, 40]] for i, p in zip(t2, [1, 4, 2]): # same order as input list assert list(t[p - 1]) == i def test_invalid_updates(self, main_col, table_types, engine): # using .loc and .loc_indices with a value not present should raise an exception self._setup(main_col, table_types) t = Table( [[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6]], names=("a", "b", "c"), meta={"name": "first table"}, ) t.add_index("a") with pytest.raises(ValueError): t.loc[3] = [[1, 2, 3]] with pytest.raises(ValueError): t.loc[[1, 4, 2]] = [[1, 2, 3], [4, 5, 6]] with pytest.raises(ValueError): t.loc[[1, 4, 2]] = [[1, 2, 3], [4, 5, 6], [2, 3]] with pytest.raises(ValueError): t.loc[[1, 4, 2]] = [[1, 2, 3], [4, 5], [2, 3]] def test_get_index(): a = [1, 4, 5, 2, 7, 4, 45] b = [2.0, 5.0, 8.2, 3.7, 4.3, 6.5, 3.3] t = Table([a, b], names=("a", "b"), meta={"name": "first table"}) t.add_index(["a"]) # Getting the values of index using names x1 = get_index(t, names=["a"]) assert isinstance(x1, SlicedIndex) assert len(x1.columns) == 1 assert len(x1.columns[0]) == 7 assert x1.columns[0].info.name == "a" # Getting the vales of index using table_copy x2 = get_index(t, table_copy=t[["a"]]) assert isinstance(x2, SlicedIndex) assert len(x2.columns) == 1 assert len(x2.columns[0]) == 7 assert x2.columns[0].info.name == "a" with pytest.raises(ValueError): get_index(t, names=["a"], table_copy=t[["a"]]) with pytest.raises(ValueError): get_index(t, names=None, table_copy=None) def test_table_index_time_warning(engine): # Make sure that no ERFA warnings are emitted when indexing a table by # a Time column with a non-default time scale tab = Table() tab["a"] = Time([1, 2, 3], format="jyear", scale="tai") tab["b"] = [4, 3, 2] with warnings.catch_warnings(record=True) as wlist: tab.add_index(("a", "b"), engine=engine) assert len(wlist) == 0 @pytest.mark.parametrize( "col", [ Column(np.arange(50000, 50005)), np.arange(50000, 50005) * u.m, Time(np.arange(50000, 50005), format="mjd"), ], ) def test_table_index_does_not_propagate_to_column_slices(col): # They lost contact to the parent table, so they should also not have # information on the indices; this helps prevent large memory usage if, # e.g., a large time column is turned into an object array; see gh-10688. tab = QTable() tab["t"] = col tab.add_index("t") t = tab["t"] assert t.info.indices tx = t[1:] assert not tx.info.indices tabx = tab[1:] t = tabx["t"] assert t.info.indices def test_hstack_qtable_table(): # Check in particular that indices are initialized or copied correctly # for a Column that is being converted to a Quantity. qtab = QTable([np.arange(5.0) * u.m], names=["s"]) qtab.add_index("s") tab = Table([Column(np.arange(5.0), unit=u.s)], names=["t"]) qstack = hstack([qtab, tab]) assert qstack["t"].info.indices == [] assert qstack.indices == [] def test_index_slice_exception(): with pytest.raises(TypeError, match="index_slice must be tuple or slice"): SlicedIndex(None, None)
4ed65f850d01b0bbbaa0bb8a8e07907a5cf126b1e5add24965f63d639f04277e
import textwrap from os.path import abspath, dirname, join import pytest from astropy import extern from astropy.coordinates import SkyCoord from astropy.table.table import Table from astropy.time import Time from astropy.utils.compat.optional_deps import HAS_BLEACH, HAS_IPYTHON from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH EXTERN_DIR = abspath(join(dirname(extern.__file__), "jquery", "data")) JQUERY_MIN_JS = "jquery-3.6.0.min.js" REFERENCE = """ <html> <head> <meta charset="utf-8"/> <meta content="text/html;charset=UTF-8" http-equiv="Content-type"/> <style> body {font-family: sans-serif;} table.dataTable {width: auto !important; margin: 0 !important;} .dataTables_filter, .dataTables_paginate {float: left !important; margin-left:1em} </style> <link href="%(datatables_css_url)s" rel="stylesheet" type="text/css"/> <script src="%(jquery_url)s"> </script> <script src="%(datatables_js_url)s"> </script> </head> <body> <script> var astropy_sort_num = function(a, b) { var a_num = parseFloat(a); var b_num = parseFloat(b); if (isNaN(a_num) && isNaN(b_num)) return ((a < b) ? -1 : ((a > b) ? 1 : 0)); else if (!isNaN(a_num) && !isNaN(b_num)) return ((a_num < b_num) ? -1 : ((a_num > b_num) ? 1 : 0)); else return isNaN(a_num) ? -1 : 1; } jQuery.extend( jQuery.fn.dataTableExt.oSort, { "optionalnum-asc": astropy_sort_num, "optionalnum-desc": function (a,b) { return -astropy_sort_num(a, b); } }); $(document).ready(function() { $('#%(table_id)s').dataTable({ order: [], pageLength: %(length)s, lengthMenu: [[%(display_length)s, -1], [%(display_length)s, 'All']], pagingType: "full_numbers", columnDefs: [{targets: [0], type: "optionalnum"}] }); } ); </script> <table class="%(table_class)s" id="%(table_id)s"> <thead> <tr> <th>a</th> <th>b</th> </tr> </thead> %(lines)s </table> </body> </html> """ TPL = " <tr>\n <td>{0}</td>\n <td>{1}</td>\n </tr>" def format_lines(col1, col2): col1_format = getattr(col1.info, "default_format", lambda x: x) col2_format = getattr(col2.info, "default_format", lambda x: x) return "\n".join( TPL.format(col1_format(v1), col2_format(v2)) for v1, v2 in zip(col1, col2) ) def test_write_jsviewer_default(tmp_path): t = Table() t["a"] = [1, 2, 3, 4, 5] t["b"] = ["a", "b", "c", "d", "e"] t["a"].unit = "m" tmpfile = tmp_path / "test.html" t.write(tmpfile, format="jsviewer") ref = REFERENCE % dict( lines=format_lines(t["a"], t["b"]), table_class="display compact", table_id=f"table{id(t)}", length="50", display_length="10, 25, 50, 100, 500, 1000", datatables_css_url=( "https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css" ), datatables_js_url=( "https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js" ), jquery_url="https://code.jquery.com/" + JQUERY_MIN_JS, ) with open(tmpfile) as f: assert f.read().strip() == ref.strip() def test_write_jsviewer_overwrite(tmp_path): t = Table() t["a"] = [1, 2, 3, 4, 5] t["b"] = ["a", "b", "c", "d", "e"] t["a"].unit = "m" tmpfile = tmp_path / "test.html" # normal write t.write(tmpfile, format="jsviewer") # errors on overwrite with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH): t.write(tmpfile, format="jsviewer") # unless specified t.write(tmpfile, format="jsviewer", overwrite=True) @pytest.mark.parametrize( "mixin", [ Time(["J2000", "J2001"]), Time([50000.0, 50001.0001], format="mjd"), SkyCoord(ra=[100.0, 110.0], dec=[-10.0, 10.0], unit="deg"), ], ) def test_write_jsviewer_mixin(tmp_path, mixin): t = Table() t["a"] = [1, 2] t["b"] = mixin t["a"].unit = "m" tmpfile = tmp_path / "test.html" t.write(tmpfile, format="jsviewer") ref = REFERENCE % dict( lines=format_lines(t["a"], t["b"]), table_class="display compact", table_id=f"table{id(t)}", length="50", display_length="10, 25, 50, 100, 500, 1000", datatables_css_url=( "https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css" ), datatables_js_url=( "https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js" ), jquery_url="https://code.jquery.com/" + JQUERY_MIN_JS, ) with open(tmpfile) as f: assert f.read().strip() == ref.strip() @pytest.mark.skipif(not HAS_BLEACH, reason="requires bleach") def test_write_jsviewer_options(tmp_path): t = Table() t["a"] = [1, 2, 3, 4, 5] t["b"] = ["<b>a</b>", "b", "c", "d", "e"] t["a"].unit = "m" tmpfile = tmp_path / "test.html" t.write( tmpfile, format="jsviewer", table_id="test", max_lines=3, jskwargs={"display_length": 5}, table_class="display hover", htmldict=dict(raw_html_cols="b"), ) ref = REFERENCE % dict( lines=format_lines(t["a"][:3], t["b"][:3]), table_class="display hover", table_id="test", length="5", display_length="5, 10, 25, 50, 100, 500, 1000", datatables_css_url=( "https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css" ), datatables_js_url=( "https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js" ), jquery_url="https://code.jquery.com/" + JQUERY_MIN_JS, ) with open(tmpfile) as f: assert f.read().strip() == ref.strip() def test_write_jsviewer_local(tmp_path): t = Table() t["a"] = [1, 2, 3, 4, 5] t["b"] = ["a", "b", "c", "d", "e"] t["a"].unit = "m" tmpfile = tmp_path / "test.html" t.write( tmpfile, format="jsviewer", table_id="test", jskwargs={"use_local_files": True} ) ref = REFERENCE % dict( lines=format_lines(t["a"], t["b"]), table_class="display compact", table_id="test", length="50", display_length="10, 25, 50, 100, 500, 1000", datatables_css_url="file://" + join(EXTERN_DIR, "css", "jquery.dataTables.css"), datatables_js_url="file://" + join(EXTERN_DIR, "js", "jquery.dataTables.min.js"), jquery_url="file://" + join(EXTERN_DIR, "js", JQUERY_MIN_JS), ) with open(tmpfile) as f: assert f.read().strip() == ref.strip() @pytest.mark.skipif(not HAS_IPYTHON, reason="requires iPython") def test_show_in_notebook(): t = Table() t["a"] = [1, 2, 3, 4, 5] t["b"] = ["b", "c", "a", "d", "e"] htmlstr_windx = t.show_in_notebook().data # should default to 'idx' htmlstr_windx_named = t.show_in_notebook(show_row_index="realidx").data htmlstr_woindx = t.show_in_notebook(show_row_index=False).data assert ( textwrap.dedent( """ <thead><tr><th>idx</th><th>a</th><th>b</th></tr></thead> <tr><td>0</td><td>1</td><td>b</td></tr> <tr><td>1</td><td>2</td><td>c</td></tr> <tr><td>2</td><td>3</td><td>a</td></tr> <tr><td>3</td><td>4</td><td>d</td></tr> <tr><td>4</td><td>5</td><td>e</td></tr> """ ).strip() in htmlstr_windx ) assert ( "<thead><tr><th>realidx</th><th>a</th><th>b</th></tr></thead>" in htmlstr_windx_named ) assert "<thead><tr><th>a</th><th>b</th></tr></thead>" in htmlstr_woindx
dfd1d8b50a151ca7eed07f2759c7e303821291d93292f04651a8e3c46a74361c
# Licensed under a 3-clause BSD style license - see LICENSE.rst from collections import OrderedDict, UserDict from collections.abc import Mapping import numpy as np import pytest import astropy.units as u from astropy.table import Column, MaskedColumn, Table, TableColumns class DictLike(Mapping): """A minimal mapping-like object that does not subclass dict. This is used to test code that expects dict-like but without actually inheriting from dict. """ def __init__(self, *args, **kwargs): self._data = dict(*args, **kwargs) def __getitem__(self, item): return self._data[item] def __setitem__(self, item, value): self._data[item] = value def __iter__(self): return iter(self._data) def __len__(self): return len(self._data) class TestTableColumnsInit: def test_init(self): """Test initialisation with lists, tuples, dicts of arrays rather than Columns [regression test for #2647]""" x1 = np.arange(10.0) x2 = np.arange(5.0) x3 = np.arange(7.0) col_list = [("x1", x1), ("x2", x2), ("x3", x3)] tc_list = TableColumns(col_list) for col in col_list: assert col[0] in tc_list assert tc_list[col[0]] is col[1] col_tuple = (("x1", x1), ("x2", x2), ("x3", x3)) tc_tuple = TableColumns(col_tuple) for col in col_tuple: assert col[0] in tc_tuple assert tc_tuple[col[0]] is col[1] col_dict = dict([("x1", x1), ("x2", x2), ("x3", x3)]) tc_dict = TableColumns(col_dict) for col in tc_dict.keys(): assert col in tc_dict assert tc_dict[col] is col_dict[col] columns = [Column(col[1], name=col[0]) for col in col_list] tc = TableColumns(columns) for col in columns: assert col.name in tc assert tc[col.name] is col # pytest.mark.usefixtures('table_type') class BaseInitFrom: def _setup(self, table_type): pass def test_basic_init(self, table_type): self._setup(table_type) t = table_type(self.data, names=("a", "b", "c")) assert t.colnames == ["a", "b", "c"] assert np.all(t["a"] == np.array([1, 3])) assert np.all(t["b"] == np.array([2, 4])) assert np.all(t["c"] == np.array([3, 5])) assert all(t[name].name == name for name in t.colnames) def test_set_dtype(self, table_type): self._setup(table_type) t = table_type(self.data, names=("a", "b", "c"), dtype=("i4", "f4", "f8")) assert t.colnames == ["a", "b", "c"] assert np.all(t["a"] == np.array([1, 3], dtype="i4")) assert np.all(t["b"] == np.array([2, 4], dtype="f4")) assert np.all(t["c"] == np.array([3, 5], dtype="f8")) assert t["a"].dtype.type == np.int32 assert t["b"].dtype.type == np.float32 assert t["c"].dtype.type == np.float64 assert all(t[name].name == name for name in t.colnames) def test_names_dtype_mismatch(self, table_type): self._setup(table_type) with pytest.raises(ValueError): table_type(self.data, names=("a",), dtype=("i4", "f4", "i4")) def test_names_cols_mismatch(self, table_type): self._setup(table_type) with pytest.raises(ValueError): table_type(self.data, names=("a",), dtype="i4") @pytest.mark.usefixtures("table_type") class BaseInitFromListLike(BaseInitFrom): def test_names_cols_mismatch(self, table_type): self._setup(table_type) with pytest.raises(ValueError): table_type(self.data, names=["a"], dtype=[int]) def test_names_copy_false(self, table_type): self._setup(table_type) with pytest.raises(ValueError): table_type(self.data, names=["a"], dtype=[int], copy=False) @pytest.mark.usefixtures("table_type") class BaseInitFromDictLike(BaseInitFrom): pass @pytest.mark.usefixtures("table_type") class TestInitFromNdarrayHomo(BaseInitFromListLike): def setup_method(self, method): self.data = np.array([(1, 2, 3), (3, 4, 5)], dtype="i4") def test_default_names(self, table_type): self._setup(table_type) t = table_type(self.data) assert t.colnames == ["col0", "col1", "col2"] def test_ndarray_ref(self, table_type): """Init with ndarray and copy=False and show that this is a reference to input ndarray""" self._setup(table_type) t = table_type(self.data, copy=False) t["col1"][1] = 0 assert t.as_array()["col1"][1] == 0 assert t["col1"][1] == 0 assert self.data[1][1] == 0 def test_partial_names_dtype(self, table_type): self._setup(table_type) t = table_type(self.data, names=["a", None, "c"], dtype=[None, None, "f8"]) assert t.colnames == ["a", "col1", "c"] assert t["a"].dtype.type == np.int32 assert t["col1"].dtype.type == np.int32 assert t["c"].dtype.type == np.float64 assert all(t[name].name == name for name in t.colnames) def test_partial_names_ref(self, table_type): self._setup(table_type) t = table_type(self.data, names=["a", None, "c"]) assert t.colnames == ["a", "col1", "c"] assert t["a"].dtype.type == np.int32 assert t["col1"].dtype.type == np.int32 assert t["c"].dtype.type == np.int32 assert all(t[name].name == name for name in t.colnames) @pytest.mark.usefixtures("table_type") class TestInitFromListOfLists(BaseInitFromListLike): def setup_method(self, table_type): self._setup(table_type) self.data = [ (np.int32(1), np.int32(3)), Column(name="col1", data=[2, 4], dtype=np.int32), np.array([3, 5], dtype=np.int32), ] def test_default_names(self, table_type): self._setup(table_type) t = table_type(self.data) assert t.colnames == ["col0", "col1", "col2"] assert all(t[name].name == name for name in t.colnames) def test_partial_names_dtype(self, table_type): self._setup(table_type) t = table_type(self.data, names=["b", None, "c"], dtype=["f4", None, "f8"]) assert t.colnames == ["b", "col1", "c"] assert t["b"].dtype.type == np.float32 assert t["col1"].dtype.type == np.int32 assert t["c"].dtype.type == np.float64 assert all(t[name].name == name for name in t.colnames) def test_bad_data(self, table_type): self._setup(table_type) with pytest.raises(ValueError): table_type([[1, 2], [3, 4, 5]]) @pytest.mark.usefixtures("table_type") class TestInitFromListOfDicts(BaseInitFromListLike): def _setup(self, table_type): self.data = [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4, "c": 5}] self.data_ragged = [{"a": 1, "b": 2}, {"a": 2, "c": 4}] def test_names(self, table_type): self._setup(table_type) t = table_type(self.data) assert all(colname in {"a", "b", "c"} for colname in t.colnames) def test_names_ordered(self, table_type): self._setup(table_type) t = table_type(self.data, names=("c", "b", "a")) assert t.colnames == ["c", "b", "a"] def test_missing_data_init_from_dict(self, table_type): self._setup(table_type) dat = self.data_ragged for rows in [False, True]: t = table_type(rows=dat) if rows else table_type(dat) assert np.all(t["a"] == [1, 2]) assert np.all(t["b"].mask == [False, True]) assert np.all(t["b"].data == [2, 2]) assert np.all(t["c"].mask == [True, False]) assert np.all(t["c"].data == [4, 4]) assert type(t["a"]) is (MaskedColumn if t.masked else Column) assert type(t["b"]) is MaskedColumn assert type(t["c"]) is MaskedColumn class TestInitFromListOfMapping(TestInitFromListOfDicts): """Test that init from a Mapping that is not a dict subclass works""" def _setup(self, table_type): self.data = [DictLike(a=1, b=2, c=3), DictLike(a=3, b=4, c=5)] self.data_ragged = [DictLike(a=1, b=2), DictLike(a=2, c=4)] # Make sure data rows are not a dict subclass assert not isinstance(self.data[0], dict) @pytest.mark.usefixtures("table_type") class TestInitFromColsList(BaseInitFromListLike): def _setup(self, table_type): self.data = [ Column([1, 3], name="x", dtype=np.int32), np.array([2, 4], dtype=np.int32), np.array([3, 5], dtype="i8"), ] def test_default_names(self, table_type): self._setup(table_type) t = table_type(self.data) assert t.colnames == ["x", "col1", "col2"] assert all(t[name].name == name for name in t.colnames) def test_partial_names_dtype(self, table_type): self._setup(table_type) t = table_type(self.data, names=["b", None, "c"], dtype=["f4", None, "f8"]) assert t.colnames == ["b", "col1", "c"] assert t["b"].dtype.type == np.float32 assert t["col1"].dtype.type == np.int32 assert t["c"].dtype.type == np.float64 assert all(t[name].name == name for name in t.colnames) def test_ref(self, table_type): """Test that initializing from a list of columns can be done by reference""" self._setup(table_type) t = table_type(self.data, copy=False) t["x"][0] = 100 assert self.data[0][0] == 100 @pytest.mark.usefixtures("table_type") class TestInitFromNdarrayStruct(BaseInitFromDictLike): def _setup(self, table_type): self.data = np.array( [(1, 2, 3), (3, 4, 5)], dtype=[("x", "i8"), ("y", "i4"), ("z", "i8")] ) def test_ndarray_ref(self, table_type): """Init with ndarray and copy=False and show that table uses reference to input ndarray""" self._setup(table_type) t = table_type(self.data, copy=False) t["x"][1] = 0 # Column-wise assignment t[0]["y"] = 0 # Row-wise assignment assert self.data["x"][1] == 0 assert self.data["y"][0] == 0 assert np.all(np.array(t) == self.data) assert all(t[name].name == name for name in t.colnames) def test_partial_names_dtype(self, table_type): self._setup(table_type) t = table_type(self.data, names=["e", None, "d"], dtype=["f4", None, "f8"]) assert t.colnames == ["e", "y", "d"] assert t["e"].dtype.type == np.float32 assert t["y"].dtype.type == np.int32 assert t["d"].dtype.type == np.float64 assert all(t[name].name == name for name in t.colnames) def test_partial_names_ref(self, table_type): self._setup(table_type) t = table_type(self.data, names=["e", None, "d"], copy=False) assert t.colnames == ["e", "y", "d"] assert t["e"].dtype.type == np.int64 assert t["y"].dtype.type == np.int32 assert t["d"].dtype.type == np.int64 assert all(t[name].name == name for name in t.colnames) @pytest.mark.usefixtures("table_type") class TestInitFromDict(BaseInitFromDictLike): def _setup(self, table_type): self.data = dict( [ ("a", Column([1, 3], name="x")), ("b", [2, 4]), ("c", np.array([3, 5], dtype="i8")), ] ) @pytest.mark.usefixtures("table_type") class TestInitFromMapping(BaseInitFromDictLike): def _setup(self, table_type): self.data = UserDict( [ ("a", Column([1, 3], name="x")), ("b", [2, 4]), ("c", np.array([3, 5], dtype="i8")), ] ) assert isinstance(self.data, Mapping) assert not isinstance(self.data, dict) @pytest.mark.usefixtures("table_type") class TestInitFromOrderedDict(BaseInitFromDictLike): def _setup(self, table_type): self.data = OrderedDict( [ ("a", Column(name="x", data=[1, 3])), ("b", [2, 4]), ("c", np.array([3, 5], dtype="i8")), ] ) def test_col_order(self, table_type): self._setup(table_type) t = table_type(self.data) assert t.colnames == ["a", "b", "c"] @pytest.mark.usefixtures("table_type") class TestInitFromRow(BaseInitFromDictLike): def _setup(self, table_type): arr = np.array( [(1, 2, 3), (3, 4, 5)], dtype=[("x", "i8"), ("y", "i8"), ("z", "f8")] ) self.data = table_type(arr, meta={"comments": ["comment1", "comment2"]}) def test_init_from_row(self, table_type): self._setup(table_type) t = table_type(self.data[0]) # Values and meta match original assert t.meta["comments"][0] == "comment1" for name in t.colnames: assert np.all(t[name] == self.data[name][0:1]) assert all(t[name].name == name for name in t.colnames) # Change value in new instance and check that original is the same t["x"][0] = 8 t.meta["comments"][1] = "new comment2" assert np.all(t["x"] == np.array([8])) assert np.all(self.data["x"] == np.array([1, 3])) assert self.data.meta["comments"][1] == "comment2" @pytest.mark.usefixtures("table_type") class TestInitFromTable(BaseInitFromDictLike): def _setup(self, table_type): arr = np.array( [(1, 2, 3), (3, 4, 5)], dtype=[("x", "i8"), ("y", "i8"), ("z", "f8")] ) self.data = table_type(arr, meta={"comments": ["comment1", "comment2"]}) def test_data_meta_copy(self, table_type): self._setup(table_type) t = table_type(self.data) assert t.meta["comments"][0] == "comment1" t["x"][1] = 8 t.meta["comments"][1] = "new comment2" assert self.data.meta["comments"][1] == "comment2" assert np.all(t["x"] == np.array([1, 8])) assert np.all(self.data["x"] == np.array([1, 3])) assert t["z"].name == "z" assert all(t[name].name == name for name in t.colnames) def test_table_ref(self, table_type): self._setup(table_type) t = table_type(self.data, copy=False) t["x"][1] = 0 assert t["x"][1] == 0 assert self.data["x"][1] == 0 assert np.all(t.as_array() == self.data.as_array()) assert all(t[name].name == name for name in t.colnames) def test_partial_names_dtype(self, table_type): self._setup(table_type) t = table_type(self.data, names=["e", None, "d"], dtype=["f4", None, "i8"]) assert t.colnames == ["e", "y", "d"] assert t["e"].dtype.type == np.float32 assert t["y"].dtype.type == np.int64 assert t["d"].dtype.type == np.int64 assert all(t[name].name == name for name in t.colnames) def test_partial_names_ref(self, table_type): self._setup(table_type) t = table_type(self.data, names=["e", None, "d"], copy=False) assert t.colnames == ["e", "y", "d"] assert t["e"].dtype.type == np.int64 assert t["y"].dtype.type == np.int64 assert t["d"].dtype.type == np.float64 assert all(t[name].name == name for name in t.colnames) def test_init_from_columns(self, table_type): self._setup(table_type) t = table_type(self.data) t2 = table_type(t.columns["z", "x", "y"]) assert t2.colnames == ["z", "x", "y"] assert t2.dtype.names == ("z", "x", "y") def test_init_from_columns_slice(self, table_type): self._setup(table_type) t = table_type(self.data) t2 = table_type(t.columns[0:2]) assert t2.colnames == ["x", "y"] assert t2.dtype.names == ("x", "y") def test_init_from_columns_mix(self, table_type): self._setup(table_type) t = table_type(self.data) t2 = table_type([t.columns[0], t.columns["z"]]) assert t2.colnames == ["x", "z"] assert t2.dtype.names == ("x", "z") @pytest.mark.usefixtures("table_type") class TestInitFromNone: # Note table_table.TestEmptyData tests initializing a completely empty # table and adding data. def test_data_none_with_cols(self, table_type): """ Test different ways of initing an empty table """ np_t = np.empty(0, dtype=[("a", "f4", (2,)), ("b", "i4")]) for kwargs in ( {"names": ("a", "b")}, {"names": ("a", "b"), "dtype": (("f4", (2,)), "i4")}, {"dtype": [("a", "f4", (2,)), ("b", "i4")]}, {"dtype": np_t.dtype}, ): t = table_type(**kwargs) assert t.colnames == ["a", "b"] assert len(t["a"]) == 0 assert len(t["b"]) == 0 if "dtype" in kwargs: assert t["a"].dtype.type == np.float32 assert t["b"].dtype.type == np.int32 assert t["a"].shape[1:] == (2,) @pytest.mark.usefixtures("table_types") class TestInitFromRows: def test_init_with_rows(self, table_type): for rows in ([[1, "a"], [2, "b"]], [(1, "a"), (2, "b")], ((1, "a"), (2, "b"))): t = table_type(rows=rows, names=("a", "b")) assert np.all(t["a"] == [1, 2]) assert np.all(t["b"] == ["a", "b"]) assert t.colnames == ["a", "b"] assert t["a"].dtype.kind == "i" assert t["b"].dtype.kind in ("S", "U") # Regression test for # https://github.com/astropy/astropy/issues/3052 assert t["b"].dtype.str.endswith("1") rows = np.arange(6).reshape(2, 3) t = table_type(rows=rows, names=("a", "b", "c"), dtype=["f8", "f4", "i8"]) assert np.all(t["a"] == [0, 3]) assert np.all(t["b"] == [1, 4]) assert np.all(t["c"] == [2, 5]) assert t.colnames == ["a", "b", "c"] assert t["a"].dtype.str.endswith("f8") assert t["b"].dtype.str.endswith("f4") assert t["c"].dtype.str.endswith("i8") def test_init_with_rows_and_data(self, table_type): with pytest.raises(ValueError) as err: table_type(data=[[1]], rows=[[1]]) assert "Cannot supply both `data` and `rows` values" in str(err.value) @pytest.mark.parametrize("has_data", [True, False]) def test_init_table_with_names_and_structured_dtype(has_data): """Test fix for #10393""" arr = np.ones(2, dtype=np.dtype([("a", "i4"), ("b", "f4")])) data_args = [arr] if has_data else [] t = Table(*data_args, names=["x", "y"], dtype=arr.dtype) assert t.colnames == ["x", "y"] assert str(t["x"].dtype) == "int32" assert str(t["y"].dtype) == "float32" assert len(t) == (2 if has_data else 0) @pytest.mark.usefixtures("table_type") def test_init_and_ref_from_multidim_ndarray(table_type): """ Test that initializing from an ndarray structured array with a multi-dim column works for both copy=False and True and that the referencing is as expected. """ for copy in (False, True): nd = np.array( [(1, [10, 20]), (3, [30, 40])], dtype=[("a", "i8"), ("b", "i8", (2,))] ) t = table_type(nd, copy=copy) assert t.colnames == ["a", "b"] assert t["a"].shape == (2,) assert t["b"].shape == (2, 2) t["a"][0] = -200 t["b"][1][1] = -100 if copy: assert nd["a"][0] == 1 assert nd["b"][1][1] == 40 else: assert nd["a"][0] == -200 assert nd["b"][1][1] == -100 @pytest.mark.usefixtures("table_type") @pytest.mark.parametrize("copy", [False, True]) def test_init_and_ref_from_dict(table_type, copy): """ Test that initializing from a dict works for both copy=False and True and that the referencing is as expected. """ x1 = np.arange(10.0) x2 = np.zeros(10) col_dict = dict([("x1", x1), ("x2", x2)]) t = table_type(col_dict, copy=copy) assert set(t.colnames) == {"x1", "x2"} assert t["x1"].shape == (10,) assert t["x2"].shape == (10,) t["x1"][0] = -200 t["x2"][1] = -100 if copy: assert x1[0] == 0.0 assert x2[1] == 0.0 else: assert x1[0] == -200 assert x2[1] == -100 def test_add_none_object_column(): """Test fix for a problem introduced in #10636 (see https://github.com/astropy/astropy/pull/10636#issuecomment-676847515) """ t = Table(data={"a": [1, 2, 3]}) t["b"] = None assert all(val is None for val in t["b"]) assert t["b"].dtype.kind == "O" @pytest.mark.usefixtures("table_type") def test_init_from_row_OrderedDict(table_type): row1 = OrderedDict([("b", 1), ("a", 0)]) row2 = {"a": 10, "b": 20} rows12 = [row1, row2] row3 = dict([("b", 1), ("a", 0)]) row4 = dict([("b", 11), ("a", 10)]) rows34 = [row3, row4] t1 = table_type(rows=rows12) t2 = table_type(rows=rows34) t3 = t2[sorted(t2.colnames)] assert t1.colnames == ["b", "a"] assert t2.colnames == ["b", "a"] assert t3.colnames == ["a", "b"] def test_init_from_rows_as_generator(): rows = ((1 + ii, 2 + ii) for ii in range(2)) t = Table(rows=rows) assert np.all(t["col0"] == [1, 2]) assert np.all(t["col1"] == [2, 3]) @pytest.mark.parametrize("dtype", ["fail", "i4"]) def test_init_bad_dtype_in_empty_table(dtype): with pytest.raises( ValueError, match="type was specified but could not be parsed for column names" ): Table(dtype=dtype) def test_init_data_type_not_allowed_to_init_table(): with pytest.raises( ValueError, match="Data type <class 'str'> not allowed to init Table" ): Table("hello") def test_init_Table_from_list_of_quantity(): """Test fix for #11327""" # Variation on original example in #11327 at the Table level data = [{"x": 5 * u.m, "y": 1 * u.m}, {"x": 10 * u.m, "y": 3}] t = Table(data) assert t["x"].unit is u.m assert t["y"].unit is None assert t["x"].dtype.kind == "f" assert t["y"].dtype.kind == "O" assert np.all(t["x"] == [5, 10]) assert t["y"][0] == 1 * u.m assert t["y"][1] == 3
8a554bafe32a8f6afe2fb941b3913ecc1e0425e575fde2494d6b44f37639b3d4
import os import re import numpy as np import pytest from astropy.table.scripts import showtable ROOT = os.path.abspath(os.path.dirname(__file__)) ASCII_ROOT = os.path.join(ROOT, "..", "..", "io", "ascii", "tests") FITS_ROOT = os.path.join(ROOT, "..", "..", "io", "fits", "tests") VOTABLE_ROOT = os.path.join(ROOT, "..", "..", "io", "votable", "tests") def test_missing_file(capsys): showtable.main(["foobar.fits"]) out, err = capsys.readouterr() assert err.startswith("ERROR: [Errno 2] No such file or directory: 'foobar.fits'") def test_info(capsys): showtable.main([os.path.join(FITS_ROOT, "data/table.fits"), "--info"]) out, err = capsys.readouterr() assert out.splitlines() == [ "<Table length=3>", " name dtype ", "------ -------", "target bytes20", " V_mag float32", ] def test_stats(capsys): showtable.main([os.path.join(FITS_ROOT, "data/table.fits"), "--stats"]) out, err = capsys.readouterr() expected = [ "<Table length=3>", " name mean std min max ", "------ ------- ------- ---- ----", "target -- -- -- --", " V_mag 12.866[0-9]? 1.72111 11.1 15.2", ] out = out.splitlines() assert out[:4] == expected[:4] # Here we use re.match as in some cases one of the values above is # platform-dependent. assert re.match(expected[4], out[4]) is not None def test_fits(capsys): showtable.main([os.path.join(FITS_ROOT, "data/table.fits")]) out, err = capsys.readouterr() assert out.splitlines() == [ " target V_mag", "------- -----", "NGC1001 11.1", "NGC1002 12.3", "NGC1003 15.2", ] def test_fits_hdu(capsys): from astropy.units import UnitsWarning with pytest.warns(UnitsWarning): showtable.main( [ os.path.join(FITS_ROOT, "data/zerowidth.fits"), "--hdu", "AIPS OF", ] ) out, err = capsys.readouterr() assert out.startswith( " TIME SOURCE ID ANTENNA NO. SUBARRAY FREQ ID ANT FLAG STATUS 1\n" " DAYS \n" "---------- --------- ----------- -------- ------- -------- --------\n" "0.14438657 1 10 1 1 4 4\n" ) def test_csv(capsys): showtable.main([os.path.join(ASCII_ROOT, "data/simple_csv.csv")]) out, err = capsys.readouterr() assert out.splitlines() == [ " a b c ", "--- --- ---", " 1 2 3", " 4 5 6", ] def test_ascii_format(capsys): showtable.main( [ os.path.join(ASCII_ROOT, "data/commented_header.dat"), "--format", "ascii.commented_header", ] ) out, err = capsys.readouterr() assert out.splitlines() == [ " a b c ", "--- --- ---", " 1 2 3", " 4 5 6", ] def test_ascii_delimiter(capsys): showtable.main( [ os.path.join(ASCII_ROOT, "data/simple2.txt"), "--format", "ascii", "--delimiter", "|", ] ) out, err = capsys.readouterr() assert out.splitlines() == [ "obsid redshift X Y object rad ", "----- -------- ---- ---- ----------- ----", " 3102 0.32 4167 4085 Q1250+568-A 9.0", " 3102 0.32 4706 3916 Q1250+568-B 14.0", " 877 0.22 4378 3892 'Source 82' 12.5", ] def test_votable(capsys): with np.errstate(over="ignore"): # https://github.com/astropy/astropy/issues/13341 showtable.main( [ os.path.join(VOTABLE_ROOT, "data/regression.xml"), "--table-id", "main_table", "--max-width", "50", ] ) out, err = capsys.readouterr() assert out.splitlines() == [ " string_test string_test_2 ... bitarray2 ", "----------------- ------------- ... -------------", " String & test Fixed stri ... True .. False", "String &amp; test 0123456789 ... -- .. --", " XXXX XXXX ... -- .. --", " ... -- .. --", " ... -- .. --", ] def test_max_lines(capsys): showtable.main( [ os.path.join(ASCII_ROOT, "data/cds2.dat"), "--format", "ascii.cds", "--max-lines", "7", "--max-width", "30", ] ) out, err = capsys.readouterr() assert out.splitlines() == [ " SST ... Note", " ... ", "--------------- ... ----", "041314.1+281910 ... --", " ... ... ...", "044427.1+251216 ... --", "044642.6+245903 ... --", "Length = 215 rows", ] def test_show_dtype(capsys): showtable.main([os.path.join(FITS_ROOT, "data/table.fits"), "--show-dtype"]) out, err = capsys.readouterr() assert out.splitlines() == [ " target V_mag ", "bytes20 float32", "------- -------", "NGC1001 11.1", "NGC1002 12.3", "NGC1003 15.2", ] def test_hide_unit(capsys): showtable.main([os.path.join(ASCII_ROOT, "data/cds.dat"), "--format", "ascii.cds"]) out, err = capsys.readouterr() assert out.splitlines() == [ "Index RAh RAm RAs DE- DEd DEm DEs Match Class AK Fit ", " h min s deg arcmin arcsec mag GMsun", "----- --- --- ----- --- --- ------ ------ ----- ----- --- -----", " 1 3 28 39.09 + 31 6 1.9 -- I* -- 1.35", ] showtable.main( [ os.path.join(ASCII_ROOT, "data/cds.dat"), "--format", "ascii.cds", "--hide-unit", ] ) out, err = capsys.readouterr() assert out.splitlines() == [ "Index RAh RAm RAs DE- DEd DEm DEs Match Class AK Fit ", "----- --- --- ----- --- --- --- --- ----- ----- --- ----", " 1 3 28 39.09 + 31 6 1.9 -- I* -- 1.35", ]
3a24de35f6c29da5b29da8cc6b33287906030dcb9ba2d015c9e3c2fd645c1936
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np import pytest from astropy import coordinates, time from astropy import units as u from astropy.table import Column, NdarrayMixin, QTable, Table, table_helpers, unique from astropy.utils.compat import NUMPY_LT_1_22, NUMPY_LT_1_22_1 from astropy.utils.exceptions import AstropyUserWarning def sort_eq(list1, list2): return sorted(list1) == sorted(list2) def test_column_group_by(T1): for masked in (False, True): t1 = QTable(T1, masked=masked) t1a = t1["a"].copy() # Group by a Column (i.e. numpy array) t1ag = t1a.group_by(t1["a"]) assert np.all(t1ag.groups.indices == np.array([0, 1, 4, 8])) # Group by a Table t1ag = t1a.group_by(t1["a", "b"]) assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8])) # Group by a numpy structured array t1ag = t1a.group_by(t1["a", "b"].as_array()) assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8])) def test_table_group_by(T1): """ Test basic table group_by functionality for possible key types and for masked/unmasked tables. """ for masked in (False, True): t1 = QTable(T1, masked=masked) # Group by a single column key specified by name tg = t1.group_by("a") assert np.all(tg.groups.indices == np.array([0, 1, 4, 8])) assert str(tg.groups) == "<TableGroups indices=[0 1 4 8]>" assert str(tg["a"].groups) == "<ColumnGroups indices=[0 1 4 8]>" # Sorted by 'a' and in original order for rest assert tg.pformat() == [ " a b c d q ", " m ", "--- --- --- --- ---", " 0 a 0.0 4 4.0", " 1 b 3.0 5 5.0", " 1 a 2.0 6 6.0", " 1 a 1.0 7 7.0", " 2 c 7.0 0 0.0", " 2 b 5.0 1 1.0", " 2 b 6.0 2 2.0", " 2 a 4.0 3 3.0", ] assert tg.meta["ta"] == 1 assert tg["c"].meta["a"] == 1 assert tg["c"].description == "column c" # Group by a table column tg2 = t1.group_by(t1["a"]) assert tg.pformat() == tg2.pformat() # Group by two columns spec'd by name for keys in (["a", "b"], ("a", "b")): tg = t1.group_by(keys) assert np.all(tg.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8])) # Sorted by 'a', 'b' and in original order for rest assert tg.pformat() == [ " a b c d q ", " m ", "--- --- --- --- ---", " 0 a 0.0 4 4.0", " 1 a 2.0 6 6.0", " 1 a 1.0 7 7.0", " 1 b 3.0 5 5.0", " 2 a 4.0 3 3.0", " 2 b 5.0 1 1.0", " 2 b 6.0 2 2.0", " 2 c 7.0 0 0.0", ] # Group by a Table tg2 = t1.group_by(t1["a", "b"]) assert tg.pformat() == tg2.pformat() # Group by a structured array tg2 = t1.group_by(t1["a", "b"].as_array()) assert tg.pformat() == tg2.pformat() # Group by a simple ndarray tg = t1.group_by(np.array([0, 1, 0, 1, 2, 1, 0, 0])) assert np.all(tg.groups.indices == np.array([0, 4, 7, 8])) assert tg.pformat() == [ " a b c d q ", " m ", "--- --- --- --- ---", " 2 c 7.0 0 0.0", " 2 b 6.0 2 2.0", " 1 a 2.0 6 6.0", " 1 a 1.0 7 7.0", " 2 b 5.0 1 1.0", " 2 a 4.0 3 3.0", " 1 b 3.0 5 5.0", " 0 a 0.0 4 4.0", ] def test_groups_keys(T1): tg = T1.group_by("a") keys = tg.groups.keys assert keys.dtype.names == ("a",) assert np.all(keys["a"] == np.array([0, 1, 2])) tg = T1.group_by(["a", "b"]) keys = tg.groups.keys assert keys.dtype.names == ("a", "b") assert np.all(keys["a"] == np.array([0, 1, 1, 2, 2, 2])) assert np.all(keys["b"] == np.array(["a", "a", "b", "a", "b", "c"])) # Grouping by Column ignores column name tg = T1.group_by(T1["b"]) keys = tg.groups.keys assert keys.dtype.names is None def test_groups_iterator(T1): tg = T1.group_by("a") for ii, group in enumerate(tg.groups): assert group.pformat() == tg.groups[ii].pformat() assert group["a"][0] == tg["a"][tg.groups.indices[ii]] def test_grouped_copy(T1): """ Test that copying a table or column copies the groups properly """ for masked in (False, True): t1 = QTable(T1, masked=masked) tg = t1.group_by("a") tgc = tg.copy() assert np.all(tgc.groups.indices == tg.groups.indices) assert np.all(tgc.groups.keys == tg.groups.keys) tac = tg["a"].copy() assert np.all(tac.groups.indices == tg["a"].groups.indices) c1 = t1["a"].copy() gc1 = c1.group_by(t1["a"]) gc1c = gc1.copy() assert np.all(gc1c.groups.indices == np.array([0, 1, 4, 8])) def test_grouped_slicing(T1): """ Test that slicing a table removes previous grouping """ for masked in (False, True): t1 = QTable(T1, masked=masked) # Regular slice of a table tg = t1.group_by("a") tg2 = tg[3:5] assert np.all(tg2.groups.indices == np.array([0, len(tg2)])) assert tg2.groups.keys is None def test_group_column_from_table(T1): """ Group a column that is part of a table """ cg = T1["c"].group_by(np.array(T1["a"])) assert np.all(cg.groups.keys == np.array([0, 1, 2])) assert np.all(cg.groups.indices == np.array([0, 1, 4, 8])) def test_table_groups_mask_index(T1): """ Use boolean mask as item in __getitem__ for groups """ for masked in (False, True): t1 = Table(T1, masked=masked).group_by("a") t2 = t1.groups[np.array([True, False, True])] assert len(t2.groups) == 2 assert t2.groups[0].pformat() == t1.groups[0].pformat() assert t2.groups[1].pformat() == t1.groups[2].pformat() assert np.all(t2.groups.keys["a"] == np.array([0, 2])) def test_table_groups_array_index(T1): """ Use numpy array as item in __getitem__ for groups """ for masked in (False, True): t1 = Table(T1, masked=masked).group_by("a") t2 = t1.groups[np.array([0, 2])] assert len(t2.groups) == 2 assert t2.groups[0].pformat() == t1.groups[0].pformat() assert t2.groups[1].pformat() == t1.groups[2].pformat() assert np.all(t2.groups.keys["a"] == np.array([0, 2])) def test_table_groups_slicing(T1): """ Test that slicing table groups works """ for masked in (False, True): t1 = Table(T1, masked=masked).group_by("a") # slice(0, 2) t2 = t1.groups[0:2] assert len(t2.groups) == 2 assert t2.groups[0].pformat() == t1.groups[0].pformat() assert t2.groups[1].pformat() == t1.groups[1].pformat() assert np.all(t2.groups.keys["a"] == np.array([0, 1])) # slice(1, 2) t2 = t1.groups[1:2] assert len(t2.groups) == 1 assert t2.groups[0].pformat() == t1.groups[1].pformat() assert np.all(t2.groups.keys["a"] == np.array([1])) # slice(0, 3, 2) t2 = t1.groups[0:3:2] assert len(t2.groups) == 2 assert t2.groups[0].pformat() == t1.groups[0].pformat() assert t2.groups[1].pformat() == t1.groups[2].pformat() assert np.all(t2.groups.keys["a"] == np.array([0, 2])) def test_grouped_item_access(T1): """ Test that column slicing preserves grouping """ for masked in (False, True): t1 = Table(T1, masked=masked) # Regular slice of a table tg = t1.group_by("a") tgs = tg["a", "c", "d"] assert np.all(tgs.groups.keys == tg.groups.keys) assert np.all(tgs.groups.indices == tg.groups.indices) tgsa = tgs.groups.aggregate(np.sum) assert tgsa.pformat() == [ " a c d ", "--- ---- ---", " 0 0.0 4", " 1 6.0 18", " 2 22.0 6", ] tgs = tg["c", "d"] assert np.all(tgs.groups.keys == tg.groups.keys) assert np.all(tgs.groups.indices == tg.groups.indices) tgsa = tgs.groups.aggregate(np.sum) assert tgsa.pformat() == [ " c d ", "---- ---", " 0.0 4", " 6.0 18", "22.0 6", ] def test_mutable_operations(T1): """ Operations like adding or deleting a row should removing grouping, but adding or removing or renaming a column should retain grouping. """ for masked in (False, True): t1 = QTable(T1, masked=masked) # add row tg = t1.group_by("a") tg.add_row((0, "a", 3.0, 4, 4 * u.m)) assert np.all(tg.groups.indices == np.array([0, len(tg)])) assert tg.groups.keys is None # remove row tg = t1.group_by("a") tg.remove_row(4) assert np.all(tg.groups.indices == np.array([0, len(tg)])) assert tg.groups.keys is None # add column tg = t1.group_by("a") indices = tg.groups.indices.copy() tg.add_column(Column(name="e", data=np.arange(len(tg)))) assert np.all(tg.groups.indices == indices) assert np.all(tg["e"].groups.indices == indices) assert np.all(tg["e"].groups.keys == tg.groups.keys) # remove column (not key column) tg = t1.group_by("a") tg.remove_column("b") assert np.all(tg.groups.indices == indices) # Still has original key col names assert tg.groups.keys.dtype.names == ("a",) assert np.all(tg["a"].groups.indices == indices) # remove key column tg = t1.group_by("a") tg.remove_column("a") assert np.all(tg.groups.indices == indices) assert tg.groups.keys.dtype.names == ("a",) assert np.all(tg["b"].groups.indices == indices) # rename key column tg = t1.group_by("a") tg.rename_column("a", "aa") assert np.all(tg.groups.indices == indices) assert tg.groups.keys.dtype.names == ("a",) assert np.all(tg["aa"].groups.indices == indices) def test_group_by_masked(T1): t1m = QTable(T1, masked=True) t1m["c"].mask[4] = True t1m["d"].mask[5] = True assert t1m.group_by("a").pformat() == [ " a b c d q ", " m ", "--- --- --- --- ---", " 0 a -- 4 4.0", " 1 b 3.0 -- 5.0", " 1 a 2.0 6 6.0", " 1 a 1.0 7 7.0", " 2 c 7.0 0 0.0", " 2 b 5.0 1 1.0", " 2 b 6.0 2 2.0", " 2 a 4.0 3 3.0", ] def test_group_by_errors(T1): """ Appropriate errors get raised. """ # Bad column name as string with pytest.raises(ValueError): T1.group_by("f") # Bad column names in list with pytest.raises(ValueError): T1.group_by(["f", "g"]) # Wrong length array with pytest.raises(ValueError): T1.group_by(np.array([1, 2])) # Wrong type with pytest.raises(TypeError): T1.group_by(None) # Masked key column t1 = QTable(T1, masked=True) t1["a"].mask[4] = True with pytest.raises(ValueError): t1.group_by("a") def test_groups_keys_meta(T1): """ Make sure the keys meta['grouped_by_table_cols'] is working. """ # Group by column in this table tg = T1.group_by("a") assert tg.groups.keys.meta["grouped_by_table_cols"] is True assert tg["c"].groups.keys.meta["grouped_by_table_cols"] is True assert tg.groups[1].groups.keys.meta["grouped_by_table_cols"] is True assert ( tg["d"] .groups[np.array([False, True, True])] .groups.keys.meta["grouped_by_table_cols"] is True ) # Group by external Table tg = T1.group_by(T1["a", "b"]) assert tg.groups.keys.meta["grouped_by_table_cols"] is False assert tg["c"].groups.keys.meta["grouped_by_table_cols"] is False assert tg.groups[1].groups.keys.meta["grouped_by_table_cols"] is False # Group by external numpy array tg = T1.group_by(T1["a", "b"].as_array()) assert not hasattr(tg.groups.keys, "meta") assert not hasattr(tg["c"].groups.keys, "meta") # Group by Column tg = T1.group_by(T1["a"]) assert "grouped_by_table_cols" not in tg.groups.keys.meta assert "grouped_by_table_cols" not in tg["c"].groups.keys.meta def test_table_aggregate(T1): """ Aggregate a table """ # Table with only summable cols t1 = T1["a", "c", "d"] tg = t1.group_by("a") tga = tg.groups.aggregate(np.sum) assert tga.pformat() == [ " a c d ", "--- ---- ---", " 0 0.0 4", " 1 6.0 18", " 2 22.0 6", ] # Reverts to default groups assert np.all(tga.groups.indices == np.array([0, 3])) assert tga.groups.keys is None # metadata survives assert tga.meta["ta"] == 1 assert tga["c"].meta["a"] == 1 assert tga["c"].description == "column c" # Aggregate with np.sum with masked elements. This results # in one group with no elements, hence a nan result and conversion # to float for the 'd' column. t1m = QTable(T1, masked=True) t1m["c"].mask[4:6] = True t1m["d"].mask[4:6] = True tg = t1m.group_by("a") with pytest.warns(UserWarning, match="converting a masked element to nan"): tga = tg.groups.aggregate(np.sum) assert tga.pformat() == [ " a c d q ", " m ", "--- ---- ---- ----", " 0 nan nan 4.0", " 1 3.0 13.0 18.0", " 2 22.0 6.0 6.0", ] # Aggregrate with np.sum with masked elements, but where every # group has at least one remaining (unmasked) element. Then # the int column stays as an int. t1m = QTable(t1, masked=True) t1m["c"].mask[5] = True t1m["d"].mask[5] = True tg = t1m.group_by("a") tga = tg.groups.aggregate(np.sum) assert tga.pformat() == [ " a c d ", "--- ---- ---", " 0 0.0 4", " 1 3.0 13", " 2 22.0 6", ] # Aggregate with a column type that cannot by supplied to the aggregating # function. This raises a warning but still works. tg = T1.group_by("a") with pytest.warns(AstropyUserWarning, match="Cannot aggregate column"): tga = tg.groups.aggregate(np.sum) assert tga.pformat() == [ " a c d q ", " m ", "--- ---- --- ----", " 0 0.0 4 4.0", " 1 6.0 18 18.0", " 2 22.0 6 6.0", ] def test_table_aggregate_reduceat(T1): """ Aggregate table with functions which have a reduceat method """ # Comparison functions without reduceat def np_mean(x): return np.mean(x) def np_sum(x): return np.sum(x) def np_add(x): return np.add(x) # Table with only summable cols t1 = T1["a", "c", "d"] tg = t1.group_by("a") # Comparison tga_r = tg.groups.aggregate(np.sum) tga_a = tg.groups.aggregate(np.add) tga_n = tg.groups.aggregate(np_sum) assert np.all(tga_r == tga_n) assert np.all(tga_a == tga_n) assert tga_n.pformat() == [ " a c d ", "--- ---- ---", " 0 0.0 4", " 1 6.0 18", " 2 22.0 6", ] tga_r = tg.groups.aggregate(np.mean) tga_n = tg.groups.aggregate(np_mean) assert np.all(tga_r == tga_n) assert tga_n.pformat() == [ " a c d ", "--- --- ---", " 0 0.0 4.0", " 1 2.0 6.0", " 2 5.5 1.5", ] # Binary ufunc np_add should raise warning without reduceat t2 = T1["a", "c"] tg = t2.group_by("a") with pytest.warns(AstropyUserWarning, match="Cannot aggregate column"): tga = tg.groups.aggregate(np_add) assert tga.pformat() == [" a ", "---", " 0", " 1", " 2"] def test_column_aggregate(T1): """ Aggregate a single table column """ for masked in (False, True): tg = QTable(T1, masked=masked).group_by("a") tga = tg["c"].groups.aggregate(np.sum) assert tga.pformat() == [" c ", "----", " 0.0", " 6.0", "22.0"] @pytest.mark.skipif( not NUMPY_LT_1_22 and NUMPY_LT_1_22_1, reason="https://github.com/numpy/numpy/issues/20699", ) def test_column_aggregate_f8(): """https://github.com/astropy/astropy/issues/12706""" # Just want to make sure it does not crash again. for masked in (False, True): tg = Table({"a": np.arange(2, dtype=">f8")}, masked=masked).group_by("a") tga = tg["a"].groups.aggregate(np.sum) assert tga.pformat() == [" a ", "---", "0.0", "1.0"] def test_table_filter(): """ Table groups filtering """ def all_positive(table, key_colnames): colnames = [name for name in table.colnames if name not in key_colnames] for colname in colnames: if np.any(table[colname] < 0): return False return True # Negative value in 'a' column should not filter because it is a key col t = Table.read( [ " a c d", " -2 7.0 0", " -2 5.0 1", " 0 0.0 4", " 1 3.0 5", " 1 2.0 -6", " 1 1.0 7", " 3 3.0 5", " 3 -2.0 6", " 3 1.0 7", ], format="ascii", ) tg = t.group_by("a") t2 = tg.groups.filter(all_positive) assert t2.groups[0].pformat() == [ " a c d ", "--- --- ---", " -2 7.0 0", " -2 5.0 1", ] assert t2.groups[1].pformat() == [" a c d ", "--- --- ---", " 0 0.0 4"] def test_column_filter(): """ Table groups filtering """ def all_positive(column): if np.any(column < 0): return False return True # Negative value in 'a' column should not filter because it is a key col t = Table.read( [ " a c d", " -2 7.0 0", " -2 5.0 1", " 0 0.0 4", " 1 3.0 5", " 1 2.0 -6", " 1 1.0 7", " 3 3.0 5", " 3 -2.0 6", " 3 1.0 7", ], format="ascii", ) tg = t.group_by("a") c2 = tg["c"].groups.filter(all_positive) assert len(c2.groups) == 3 assert c2.groups[0].pformat() == [" c ", "---", "7.0", "5.0"] assert c2.groups[1].pformat() == [" c ", "---", "0.0"] assert c2.groups[2].pformat() == [" c ", "---", "3.0", "2.0", "1.0"] def test_group_mixins(): """ Test grouping a table with mixin columns """ # Setup mixins idx = np.arange(4) x = np.array([3.0, 1.0, 2.0, 1.0]) q = x * u.m lon = coordinates.Longitude(x * u.deg) lat = coordinates.Latitude(x * u.deg) # For Time do J2000.0 + few * 0.1 ns (this requires > 64 bit precision) tm = time.Time(2000, format="jyear") + time.TimeDelta(x * 1e-10, format="sec") sc = coordinates.SkyCoord(ra=lon, dec=lat) aw = table_helpers.ArrayWrapper(x) nd = np.array([(3, "c"), (1, "a"), (2, "b"), (1, "a")], dtype="<i4,|S1").view( NdarrayMixin ) qt = QTable( [idx, x, q, lon, lat, tm, sc, aw, nd], names=["idx", "x", "q", "lon", "lat", "tm", "sc", "aw", "nd"], ) # Test group_by with each supported mixin type mixin_keys = ["x", "q", "lon", "lat", "tm", "sc", "aw", "nd"] for key in mixin_keys: qtg = qt.group_by(key) # Test that it got the sort order correct assert np.all(qtg["idx"] == [1, 3, 2, 0]) # Test that the groups are right # Note: skip testing SkyCoord column because that doesn't have equality for name in ["x", "q", "lon", "lat", "tm", "aw", "nd"]: assert np.all(qt[name][[1, 3]] == qtg.groups[0][name]) assert np.all(qt[name][[2]] == qtg.groups[1][name]) assert np.all(qt[name][[0]] == qtg.groups[2][name]) # Test that unique also works with mixins since most of the work is # done with group_by(). This is using *every* mixin as key. uqt = unique(qt, keys=mixin_keys) assert len(uqt) == 3 assert np.all(uqt["idx"] == [1, 2, 0]) assert np.all(uqt["x"] == [1.0, 2.0, 3.0]) # Column group_by() with mixins idxg = qt["idx"].group_by(qt[mixin_keys]) assert np.all(idxg == [1, 3, 2, 0]) @pytest.mark.parametrize( "col", [ time.TimeDelta([1, 2], format="sec"), time.Time([1, 2], format="cxcsec"), coordinates.SkyCoord([1, 2], [3, 4], unit="deg,deg"), ], ) def test_group_mixins_unsupported(col): """Test that aggregating unsupported mixins produces a warning only""" t = Table([[1, 1], [3, 4], col], names=["a", "b", "mix"]) tg = t.group_by("a") with pytest.warns(AstropyUserWarning, match="Cannot aggregate column 'mix'"): tg.groups.aggregate(np.sum)
4f10f34763f0d5d8ae4b9a53af7bcfb2996ec07e3583ca6f8323c5f1c2426df7
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Test behavior related to masked tables""" import numpy as np import numpy.ma as ma import pytest import astropy.units as u from astropy.table import Column, MaskedColumn, QTable, Table from astropy.table.column import BaseColumn from astropy.time import Time from astropy.utils.masked import Masked class SetupData: def setup_method(self, method): self.a = MaskedColumn(name="a", data=[1, 2, 3], fill_value=1) self.b = MaskedColumn(name="b", data=[4, 5, 6], mask=True) self.c = MaskedColumn(name="c", data=[7, 8, 9], mask=False) self.d_mask = np.array([False, True, False]) self.d = MaskedColumn(name="d", data=[7, 8, 7], mask=self.d_mask) self.t = Table([self.a, self.b], masked=True) self.ca = Column(name="ca", data=[1, 2, 3]) self.sc = MaskedColumn( name="sc", data=[(1, 1.0), (2, 2.0), (3, 3.0)], dtype="i8,f8", fill_value=(0, -1.0), ) class TestPprint(SetupData): def test_pformat(self): assert self.t.pformat() == [ " a b ", "--- ---", " 1 --", " 2 --", " 3 --", ] class TestFilled: """Test the filled method in MaskedColumn and Table""" def setup_method(self, method): mask = [True, False, False] self.meta = {"a": 1, "b": [2, 3]} self.a = MaskedColumn( name="a", data=[1, 2, 3], fill_value=10, mask=mask, meta={"a": 1} ) self.b = MaskedColumn( name="b", data=[4.0, 5.0, 6.0], fill_value=10.0, mask=mask ) self.c = MaskedColumn(name="c", data=["7", "8", "9"], fill_value="1", mask=mask) def test_filled_column(self): f = self.a.filled() assert np.all(f == [10, 2, 3]) assert isinstance(f, Column) assert not isinstance(f, MaskedColumn) # Confirm copy, not ref assert f.meta["a"] == 1 f.meta["a"] = 2 f[1] = 100 assert self.a[1] == 2 assert self.a.meta["a"] == 1 # Fill with arg fill_value not column fill_value f = self.a.filled(20) assert np.all(f == [20, 2, 3]) f = self.b.filled() assert np.all(f == [10.0, 5.0, 6.0]) assert isinstance(f, Column) f = self.c.filled() assert np.all(f == ["1", "8", "9"]) assert isinstance(f, Column) def test_filled_masked_table(self, tableclass): t = tableclass([self.a, self.b, self.c], meta=self.meta) f = t.filled() assert isinstance(f, Table) assert f.masked is False assert np.all(f["a"] == [10, 2, 3]) assert np.allclose(f["b"], [10.0, 5.0, 6.0]) assert np.all(f["c"] == ["1", "8", "9"]) # Confirm copy, not ref assert f.meta["b"] == [2, 3] f.meta["b"][0] = 20 assert t.meta["b"] == [2, 3] f["a"][2] = 100 assert t["a"][2] == 3 def test_filled_unmasked_table(self, tableclass): t = tableclass([(1, 2), ("3", "4")], names=("a", "b"), meta=self.meta) f = t.filled() assert isinstance(f, Table) assert f.masked is False assert np.all(f["a"] == t["a"]) assert np.all(f["b"] == t["b"]) # Confirm copy, not ref assert f.meta["b"] == [2, 3] f.meta["b"][0] = 20 assert t.meta["b"] == [2, 3] f["a"][1] = 100 assert t["a"][1] == 2 class TestFillValue(SetupData): """Test setting and getting fill value in MaskedColumn and Table""" def test_init_set_fill_value(self): """Check that setting fill_value in the MaskedColumn init works""" assert self.a.fill_value == 1 c = MaskedColumn(name="c", data=["xxxx", "yyyy"], fill_value="none") assert c.fill_value == "none" def test_set_get_fill_value_for_bare_column(self): """Check set and get of fill value works for bare Column""" self.d.fill_value = -999 assert self.d.fill_value == -999 assert np.all(self.d.filled() == [7, -999, 7]) def test_set_get_fill_value_for_str_column(self): c = MaskedColumn(name="c", data=["xxxx", "yyyy"], mask=[True, False]) # assert np.all(c.filled() == ['N/A', 'yyyy']) c.fill_value = "ABCDEF" assert c.fill_value == "ABCD" # string truncated to dtype length assert np.all(c.filled() == ["ABCD", "yyyy"]) assert np.all(c.filled("XY") == ["XY", "yyyy"]) def test_set_get_fill_value_for_structured_column(self): assert self.sc.fill_value == np.array((0, -1.0), self.sc.dtype) sc = self.sc.copy() assert sc.fill_value.item() == (0, -1.0) sc.fill_value = (-1, np.inf) assert sc.fill_value == np.array((-1, np.inf), self.sc.dtype) sc2 = MaskedColumn(sc, fill_value=(-2, -np.inf)) assert sc2.fill_value == np.array((-2, -np.inf), sc2.dtype) def test_table_column_mask_not_ref(self): """Table column mask is not ref of original column mask""" self.b.fill_value = -999 assert self.t["b"].fill_value != -999 def test_set_get_fill_value_for_table_column(self): """Check set and get of fill value works for Column in a Table""" self.t["b"].fill_value = 1 assert self.t["b"].fill_value == 1 assert np.all(self.t["b"].filled() == [1, 1, 1]) def test_data_attribute_fill_and_mask(self): """Check that .data attribute preserves fill_value and mask""" self.t["b"].fill_value = 1 self.t["b"].mask = [True, False, True] assert self.t["b"].data.fill_value == 1 assert np.all(self.t["b"].data.mask == [True, False, True]) class TestMaskedColumnInit(SetupData): """Initialization of a masked column""" def test_set_mask_and_not_ref(self): """Check that mask gets set properly and that it is a copy, not ref""" assert np.all(~self.a.mask) assert np.all(self.b.mask) assert np.all(~self.c.mask) assert np.all(self.d.mask == self.d_mask) self.d.mask[0] = True assert not np.all(self.d.mask == self.d_mask) def test_set_mask_from_list(self): """Set mask from a list""" mask_list = [False, True, False] a = MaskedColumn(name="a", data=[1, 2, 3], mask=mask_list) assert np.all(a.mask == mask_list) def test_override_existing_mask(self): """Override existing mask values""" mask_list = [False, True, False] b = MaskedColumn(name="b", data=self.b, mask=mask_list) assert np.all(b.mask == mask_list) def test_incomplete_mask_spec(self): """Incomplete mask specification raises MaskError""" mask_list = [False, True] with pytest.raises(ma.MaskError): MaskedColumn(name="b", length=4, mask=mask_list) class TestTableInit(SetupData): """Initializing a table""" @pytest.mark.parametrize("type_str", ("?", "b", "i2", "f4", "c8", "S", "U", "O")) @pytest.mark.parametrize("shape", ((8,), (4, 2), (2, 2, 2))) def test_init_from_sequence_data_numeric_typed(self, type_str, shape): """Test init from list or list of lists with dtype specified, optionally including an np.ma.masked element. """ # Make data of correct dtype and shape, then turn into a list, # then use that to init Table with spec'd type_str. data = list(range(8)) np_data = np.array(data, dtype=type_str).reshape(shape) np_data_list = np_data.tolist() t = Table([np_data_list], dtype=[type_str]) col = t["col0"] assert col.dtype == np_data.dtype assert np.all(col == np_data) assert type(col) is Column # Introduce np.ma.masked in the list input and confirm dtype still OK. if len(shape) == 1: np_data_list[-1] = np.ma.masked elif len(shape) == 2: np_data_list[-1][-1] = np.ma.masked else: np_data_list[-1][-1][-1] = np.ma.masked last_idx = tuple(-1 for _ in shape) t = Table([np_data_list], dtype=[type_str]) col = t["col0"] assert col.dtype == np_data.dtype assert np.all(col == np_data) assert col.mask[last_idx] assert type(col) is MaskedColumn @pytest.mark.parametrize("type_str", ("?", "b", "i2", "f4", "c8", "S", "U", "O")) @pytest.mark.parametrize("shape", ((8,), (4, 2), (2, 2, 2))) def test_init_from_sequence_data_numeric_untyped(self, type_str, shape): """Test init from list or list of lists with dtype NOT specified, optionally including an np.ma.masked element. """ data = list(range(8)) np_data = np.array(data, dtype=type_str).reshape(shape) np_data_list = np_data.tolist() t = Table([np_data_list]) # Grab the dtype that numpy assigns for the Python list inputs dtype_expected = t["col0"].dtype # Introduce np.ma.masked in the list input and confirm dtype still OK. if len(shape) == 1: np_data_list[-1] = np.ma.masked elif len(shape) == 2: np_data_list[-1][-1] = np.ma.masked else: np_data_list[-1][-1][-1] = np.ma.masked last_idx = tuple(-1 for _ in shape) t = Table([np_data_list]) col = t["col0"] # Confirm dtype is same as for untype list input w/ no mask assert col.dtype == dtype_expected assert np.all(col == np_data) assert col.mask[last_idx] assert type(col) is MaskedColumn def test_initialization_with_all_columns(self): t1 = Table([self.a, self.b, self.c, self.d, self.ca, self.sc]) assert t1.colnames == ["a", "b", "c", "d", "ca", "sc"] # Check we get the same result by passing in as list of dict. # (Regression test for error uncovered by scintillometry package.) lofd = [{k: row[k] for k in t1.colnames} for row in t1] t2 = Table(lofd) for k in t1.colnames: assert t1[k].dtype == t2[k].dtype assert np.all(t1[k] == t2[k]) in (True, np.ma.masked) assert np.all( getattr(t1[k], "mask", False) == getattr(t2[k], "mask", False) ) def test_mask_false_if_input_mask_not_true(self): """Masking is always False if initial masked arg is not True""" t = Table([self.ca, self.a]) assert t.masked is False # True before astropy 4.0 t = Table([self.ca]) assert t.masked is False t = Table([self.ca, ma.array([1, 2, 3])]) assert t.masked is False # True before astropy 4.0 def test_mask_false_if_no_input_masked(self): """Masking not true if not (requested or input requires mask)""" t0 = Table([[3, 4]], masked=False) t1 = Table(t0, masked=True) t2 = Table(t1, masked=False) assert not t0.masked assert t1.masked assert not t2.masked def test_mask_property(self): t = self.t # Access table mask (boolean structured array) by column name assert np.all(t.mask["a"] == np.array([False, False, False])) assert np.all(t.mask["b"] == np.array([True, True, True])) # Check that setting mask from table mask has the desired effect on column t.mask["b"] = np.array([False, True, False]) assert np.all(t["b"].mask == np.array([False, True, False])) # Non-masked table returns None for mask attribute t2 = Table([self.ca], masked=False) assert t2.mask is None # Set mask property globally and verify local correctness for mask in (True, False): t.mask = mask for name in ("a", "b"): assert np.all(t[name].mask == mask) class TestAddColumn: def test_add_masked_column_to_masked_table(self): t = Table(masked=True) assert t.masked t.add_column(MaskedColumn(name="a", data=[1, 2, 3], mask=[0, 1, 0])) assert t.masked t.add_column(MaskedColumn(name="b", data=[4, 5, 6], mask=[1, 0, 1])) assert t.masked assert isinstance(t["a"], MaskedColumn) assert isinstance(t["b"], MaskedColumn) assert np.all(t["a"] == np.array([1, 2, 3])) assert np.all(t["a"].mask == np.array([0, 1, 0], bool)) assert np.all(t["b"] == np.array([4, 5, 6])) assert np.all(t["b"].mask == np.array([1, 0, 1], bool)) def test_add_masked_column_to_non_masked_table(self): t = Table(masked=False) assert not t.masked t.add_column(Column(name="a", data=[1, 2, 3])) assert not t.masked t.add_column(MaskedColumn(name="b", data=[4, 5, 6], mask=[1, 0, 1])) assert not t.masked # Changed in 4.0, table no longer auto-upgrades assert isinstance(t["a"], Column) # Was MaskedColumn before 4.0 assert isinstance(t["b"], MaskedColumn) assert np.all(t["a"] == np.array([1, 2, 3])) assert not hasattr(t["a"], "mask") assert np.all(t["b"] == np.array([4, 5, 6])) assert np.all(t["b"].mask == np.array([1, 0, 1], bool)) def test_add_non_masked_column_to_masked_table(self): t = Table(masked=True) assert t.masked t.add_column(Column(name="a", data=[1, 2, 3])) assert t.masked t.add_column(MaskedColumn(name="b", data=[4, 5, 6], mask=[1, 0, 1])) assert t.masked assert isinstance(t["a"], MaskedColumn) assert isinstance(t["b"], MaskedColumn) assert np.all(t["a"] == np.array([1, 2, 3])) assert np.all(t["a"].mask == np.array([0, 0, 0], bool)) assert np.all(t["b"] == np.array([4, 5, 6])) assert np.all(t["b"].mask == np.array([1, 0, 1], bool)) def test_convert_to_masked_table_only_if_necessary(self): # Do not convert to masked table, if new column has no masked value. # See #1185 for details. t = Table(masked=False) assert not t.masked t.add_column(Column(name="a", data=[1, 2, 3])) assert not t.masked t.add_column(MaskedColumn(name="b", data=[4, 5, 6], mask=[0, 0, 0])) assert not t.masked assert np.all(t["a"] == np.array([1, 2, 3])) assert np.all(t["b"] == np.array([4, 5, 6])) class TestRenameColumn: def test_rename_masked_column(self): t = Table(masked=True) t.add_column(MaskedColumn(name="a", data=[1, 2, 3], mask=[0, 1, 0])) t["a"].fill_value = 42 t.rename_column("a", "b") assert t.masked assert np.all(t["b"] == np.array([1, 2, 3])) assert np.all(t["b"].mask == np.array([0, 1, 0], bool)) assert t["b"].fill_value == 42 assert t.colnames == ["b"] class TestRemoveColumn: def test_remove_masked_column(self): t = Table(masked=True) t.add_column(MaskedColumn(name="a", data=[1, 2, 3], mask=[0, 1, 0])) t["a"].fill_value = 42 t.add_column(MaskedColumn(name="b", data=[4, 5, 6], mask=[1, 0, 1])) t.remove_column("b") assert t.masked assert np.all(t["a"] == np.array([1, 2, 3])) assert np.all(t["a"].mask == np.array([0, 1, 0], bool)) assert t["a"].fill_value == 42 assert t.colnames == ["a"] class TestAddRow: def test_add_masked_row_to_masked_table_iterable(self): t = Table(masked=True) t.add_column(MaskedColumn(name="a", data=[1], mask=[0])) t.add_column(MaskedColumn(name="b", data=[4], mask=[1])) t.add_row([2, 5], mask=[1, 0]) t.add_row([3, 6], mask=[0, 1]) assert t.masked assert np.all(np.array(t["a"]) == np.array([1, 2, 3])) assert np.all(t["a"].mask == np.array([0, 1, 0], bool)) assert np.all(np.array(t["b"]) == np.array([4, 5, 6])) assert np.all(t["b"].mask == np.array([1, 0, 1], bool)) def test_add_masked_row_to_masked_table_mapping1(self): t = Table(masked=True) t.add_column(MaskedColumn(name="a", data=[1], mask=[0])) t.add_column(MaskedColumn(name="b", data=[4], mask=[1])) t.add_row({"b": 5, "a": 2}, mask={"a": 1, "b": 0}) t.add_row({"a": 3, "b": 6}, mask={"b": 1, "a": 0}) assert t.masked assert np.all(np.array(t["a"]) == np.array([1, 2, 3])) assert np.all(t["a"].mask == np.array([0, 1, 0], bool)) assert np.all(np.array(t["b"]) == np.array([4, 5, 6])) assert np.all(t["b"].mask == np.array([1, 0, 1], bool)) def test_add_masked_row_to_masked_table_mapping2(self): # When adding values to a masked table, if the mask is specified as a # dict, then values not specified will have mask values set to True t = Table(masked=True) t.add_column(MaskedColumn(name="a", data=[1], mask=[0])) t.add_column(MaskedColumn(name="b", data=[4], mask=[1])) t.add_row({"b": 5}, mask={"b": 0}) t.add_row({"a": 3}, mask={"a": 0}) assert t.masked assert t["a"][0] == 1 and t["a"][2] == 3 assert np.all(t["a"].mask == np.array([0, 1, 0], bool)) assert t["b"][1] == 5 assert np.all(t["b"].mask == np.array([1, 0, 1], bool)) def test_add_masked_row_to_masked_table_mapping3(self): # When adding values to a masked table, if mask is not passed to # add_row, then the mask should be set to False if values are present # and True if not. t = Table(masked=True) t.add_column(MaskedColumn(name="a", data=[1], mask=[0])) t.add_column(MaskedColumn(name="b", data=[4], mask=[1])) t.add_row({"b": 5}) t.add_row({"a": 3}) assert t.masked assert t["a"][0] == 1 and t["a"][2] == 3 assert np.all(t["a"].mask == np.array([0, 1, 0], bool)) assert t["b"][1] == 5 assert np.all(t["b"].mask == np.array([1, 0, 1], bool)) def test_add_masked_row_to_masked_table_mapping4(self): # When adding values to a masked table, if the mask is specified as a # dict, then keys in values should match keys in mask t = Table(masked=True) t.add_column(MaskedColumn(name="a", data=[1], mask=[0])) t.add_column(MaskedColumn(name="b", data=[4], mask=[1])) with pytest.raises(ValueError) as exc: t.add_row({"b": 5}, mask={"a": True}) assert exc.value.args[0] == "keys in mask should match keys in vals" def test_add_masked_row_to_masked_table_mismatch(self): t = Table(masked=True) t.add_column(MaskedColumn(name="a", data=[1], mask=[0])) t.add_column(MaskedColumn(name="b", data=[4], mask=[1])) with pytest.raises(TypeError) as exc: t.add_row([2, 5], mask={"a": 1, "b": 0}) assert exc.value.args[0] == "Mismatch between type of vals and mask" with pytest.raises(TypeError) as exc: t.add_row({"b": 5, "a": 2}, mask=[1, 0]) assert exc.value.args[0] == "Mismatch between type of vals and mask" def test_add_masked_row_to_non_masked_table_iterable(self): t = Table(masked=False) t["a"] = [1] t["b"] = [4] t["c"] = Time([1], format="cxcsec") tm = Time(2, format="cxcsec") assert not t.masked t.add_row([2, 5, tm]) assert not t.masked t.add_row([3, 6, tm], mask=[0, 1, 1]) assert not t.masked assert type(t["a"]) is Column assert type(t["b"]) is MaskedColumn assert type(t["c"]) is Time assert np.all(t["a"] == [1, 2, 3]) assert np.all(t["b"].data == [4, 5, 6]) assert np.all(t["b"].mask == [False, False, True]) assert np.all(t["c"][:2] == Time([1, 2], format="cxcsec")) assert np.all(t["c"].mask == [False, False, True]) def test_add_row_cannot_mask_column_raises_typeerror(self): t = QTable() t["a"] = [1, 2] * u.m t.add_row((3 * u.m,)) # No problem with pytest.raises(ValueError) as exc: t.add_row((3 * u.m,), mask=(True,)) assert exc.value.args[0].splitlines() == [ "Unable to insert row because of exception in column 'a':", "mask was supplied for column 'a' but it does not support masked values", ] def test_setting_from_masked_column(): """Test issue in #2997""" mask_b = np.array([True, True, False, False]) for select in (mask_b, slice(0, 2)): t = Table(masked=True) t["a"] = Column([1, 2, 3, 4]) t["b"] = MaskedColumn([11, 22, 33, 44], mask=mask_b) t["c"] = MaskedColumn([111, 222, 333, 444], mask=[True, False, True, False]) t["b"][select] = t["c"][select] assert t["b"][1] == t[1]["b"] assert t["b"][0] is np.ma.masked # Original state since t['c'][0] is masked assert t["b"][1] == 222 # New from t['c'] since t['c'][1] is unmasked assert t["b"][2] == 33 assert t["b"][3] == 44 assert np.all( t["b"].mask == t.mask["b"] ) # Avoid t.mask in general, this is for testing mask_before_add = t.mask.copy() t["d"] = np.arange(len(t)) assert np.all(t.mask["b"] == mask_before_add["b"]) def test_coercing_fill_value_type(): """ Test that masked column fill_value is coerced into the correct column type. """ # This is the original example posted on the astropy@scipy mailing list t = Table({"a": ["1"]}, masked=True) t["a"].set_fill_value("0") t2 = Table(t, names=["a"], dtype=[np.int32]) assert isinstance(t2["a"].fill_value, np.int32) # Unit test the same thing. c = MaskedColumn(["1"]) c.set_fill_value("0") c2 = MaskedColumn(c, dtype=np.int32) assert isinstance(c2.fill_value, np.int32) def test_mask_copy(): """Test that the mask is copied when copying a table (issue #7362).""" c = MaskedColumn([1, 2], mask=[False, True]) c2 = MaskedColumn(c, copy=True) c2.mask[0] = True assert np.all(c.mask == [False, True]) assert np.all(c2.mask == [True, True]) def test_masked_as_array_with_mixin(): """Test that as_array() and Table.mask attr work with masked mixin columns""" t = Table() t["a"] = Time([1, 2], format="cxcsec") t["b"] = [3, 4] t["c"] = [5, 6] * u.m # With no mask, the output should be ndarray ta = t.as_array() assert isinstance(ta, np.ndarray) and not isinstance(ta, np.ma.MaskedArray) # With a mask, output is MaskedArray t["a"][1] = np.ma.masked ta = t.as_array() assert isinstance(ta, np.ma.MaskedArray) assert np.all(ta["a"].mask == [False, True]) assert np.isclose(ta["a"][0].cxcsec, 1.0) assert not np.any(ta["b"].mask) assert not np.any(ta["c"].mask) # Check table ``mask`` property tm = t.mask assert np.all(tm["a"] == [False, True]) assert not np.any(tm["b"]) assert not np.any(tm["c"]) def test_masked_column_with_unit_in_qtable(): """Test that adding a MaskedColumn with a unit to QTable creates a MaskedQuantity.""" MaskedQuantity = Masked(u.Quantity) t = QTable() t["a"] = MaskedColumn([1, 2]) assert isinstance(t["a"], MaskedColumn) t["b"] = MaskedColumn([1, 2], unit=u.m) assert isinstance(t["b"], MaskedQuantity) assert not np.any(t["b"].mask) t["c"] = MaskedColumn([1, 2], unit=u.m, mask=[True, False]) assert isinstance(t["c"], MaskedQuantity) assert np.all(t["c"].mask == [True, False]) def test_masked_quantity_in_table(): MaskedQuantity = Masked(u.Quantity) t = Table() t["b"] = MaskedQuantity([1, 2], unit=u.m) assert isinstance(t["b"], MaskedColumn) assert not np.any(t["b"].mask) t["c"] = MaskedQuantity([1, 2], unit=u.m, mask=[True, False]) assert isinstance(t["c"], MaskedColumn) assert np.all(t["c"].mask == [True, False]) def test_masked_column_data_attribute_is_plain_masked_array(): c = MaskedColumn([1, 2], mask=[False, True]) c_data = c.data assert type(c_data) is np.ma.MaskedArray assert type(c_data.data) is np.ndarray def test_mask_slicing_count_array_finalize(): """Check that we don't finalize MaskedColumn too often. Regression test for gh-6721. """ # Create a new BaseColumn class that counts how often # ``__array_finalize__`` is called. class MyBaseColumn(BaseColumn): counter = 0 def __array_finalize__(self, obj): super().__array_finalize__(obj) MyBaseColumn.counter += 1 # Base a new MaskedColumn class on it. The normal MaskedColumn # hardcodes the initialization to BaseColumn, so we exchange that. class MyMaskedColumn(MaskedColumn, Column, MyBaseColumn): def __new__(cls, *args, **kwargs): self = super().__new__(cls, *args, **kwargs) self._baseclass = MyBaseColumn return self # Creation really needs 2 finalizations (once for the BaseColumn # call inside ``__new__`` and once when the view as a MaskedColumn # is taken), but since the first is hardcoded, we do not capture it # and thus the count is only 1. c = MyMaskedColumn([1, 2], mask=[False, True]) assert MyBaseColumn.counter == 1 # slicing should need only one ``__array_finalize__`` (used to be 3). c0 = c[:] assert MyBaseColumn.counter == 2 # repr should need none (used to be 2!!) repr(c0) assert MyBaseColumn.counter == 2
7c7e1e53932f566befc75ae4dc0bdcdf31cec9134efb1b8af6e291d6b71cfce5
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Verify item access API in: https://github.com/astropy/astropy/wiki/Table-item-access-definition """ import numpy as np import pytest @pytest.mark.usefixtures("table_data") class BaseTestItems: pass @pytest.mark.usefixtures("table_data") class TestTableColumnsItems(BaseTestItems): def test_by_name(self, table_data): """Access TableColumns by name and show that item access returns a Column that refers to underlying table data""" self.t = table_data.Table(table_data.COLS) self.tc = self.t.columns assert self.tc["a"].name == "a" assert self.tc["a"][1] == 2 assert self.tc["a"].description == "da" assert self.tc["a"].format == "%i" assert self.tc["a"].meta == {"ma": 1} assert self.tc["a"].unit == "ua" assert self.tc["a"].attrs_equal(table_data.COLS[0]) assert isinstance(self.tc["a"], table_data.Column) self.tc["b"][1] = 0 assert self.t["b"][1] == 0 def test_by_position(self, table_data): """Access TableColumns by position and show that item access returns a Column that refers to underlying table data""" self.t = table_data.Table(table_data.COLS) self.tc = self.t.columns assert self.tc[1].name == "b" assert np.all(self.tc[1].data == table_data.COLS[1].data) assert self.tc[1].description == "db" assert self.tc[1].format == "%d" assert self.tc[1].meta == {"mb": 1} assert self.tc[1].unit == "ub" assert self.tc[1].attrs_equal(table_data.COLS[1]) assert isinstance(self.tc[1], table_data.Column) assert self.tc[2].unit == "ub" self.tc[1][1] = 0 assert self.t["b"][1] == 0 def test_mult_columns(self, table_data): """Access TableColumns with "fancy indexing" and showed that returned TableColumns object still references original data""" self.t = table_data.Table(table_data.COLS) self.tc = self.t.columns tc2 = self.tc["b", "c"] assert tc2[1].name == "c" assert tc2[1][1] == 8 assert tc2[0].name == "b" assert tc2[0][1] == 5 tc2["c"][1] = 0 assert self.tc["c"][1] == 0 assert self.t["c"][1] == 0 def test_column_slice(self, table_data): """Access TableColumns with slice and showed that returned TableColumns object still references original data""" self.t = table_data.Table(table_data.COLS) self.tc = self.t.columns tc2 = self.tc[1:3] assert tc2[1].name == "c" assert tc2[1][1] == 8 assert tc2[0].name == "b" assert tc2[0][1] == 5 tc2["c"][1] = 0 assert self.tc["c"][1] == 0 assert self.t["c"][1] == 0 @pytest.mark.usefixtures("table_data") class TestTableItems(BaseTestItems): @pytest.mark.parametrize("idx", [1, np.int64(1), np.array(1)]) def test_column(self, table_data, idx): """Column access returns REFERENCE to data""" self.t = table_data.Table(table_data.COLS) self.tc = self.t.columns a = self.t["a"] assert a[idx] == 2 a[idx] = 0 assert self.t["a"][idx] == 0 @pytest.mark.parametrize("idx", [1, np.int64(1), np.array(1)]) def test_row(self, table_data, idx): """Row access returns REFERENCE to data""" self.t = table_data.Table(table_data.COLS) self.tc = self.t.columns row = self.t[idx] assert row["a"] == 2 assert row[idx] == 5 assert row.columns["a"].attrs_equal(table_data.COLS[0]) assert row.columns["b"].attrs_equal(table_data.COLS[1]) assert row.columns["c"].attrs_equal(table_data.COLS[2]) # Check that setting by col index sets the table and row value row[idx] = 0 assert row[idx] == 0 assert row["b"] == 0 assert self.t["b"][idx] == 0 assert self.t[idx]["b"] == 0 # Check that setting by col name sets the table and row value row["a"] = 0 assert row[0] == 0 assert row["a"] == 0 assert self.t["a"][1] == 0 assert self.t[1]["a"] == 0 def test_empty_iterable_item(self, table_data): """ Table item access with [], (), or np.array([]) returns the same table with no rows. """ self.t = table_data.Table(table_data.COLS) for item in [], (), np.array([]): t2 = self.t[item] assert not t2 assert len(t2) == 0 assert t2["a"].attrs_equal(table_data.COLS[0]) assert t2["b"].attrs_equal(table_data.COLS[1]) assert t2["c"].attrs_equal(table_data.COLS[2]) def test_table_slice(self, table_data): """Table slice returns REFERENCE to data""" self.t = table_data.Table(table_data.COLS) self.tc = self.t.columns t2 = self.t[1:3] assert np.all(t2["a"] == table_data.DATA["a"][1:3]) assert t2["a"].attrs_equal(table_data.COLS[0]) assert t2["b"].attrs_equal(table_data.COLS[1]) assert t2["c"].attrs_equal(table_data.COLS[2]) t2["a"][0] = 0 assert np.all(self.t["a"] == np.array([1, 0, 3])) assert t2.masked == self.t.masked assert t2._column_class == self.t._column_class assert isinstance(t2, table_data.Table) def test_fancy_index_slice(self, table_data): """Table fancy slice returns COPY of data""" self.t = table_data.Table(table_data.COLS) self.tc = self.t.columns slice = np.array([0, 2]) t2 = self.t[slice] assert np.all(t2["a"] == table_data.DATA["a"][slice]) assert t2["a"].attrs_equal(table_data.COLS[0]) assert t2["b"].attrs_equal(table_data.COLS[1]) assert t2["c"].attrs_equal(table_data.COLS[2]) t2["a"][0] = 0 assert np.all(self.t.as_array() == table_data.DATA) assert np.any(t2["a"] != table_data.DATA["a"][slice]) assert t2.masked == self.t.masked assert t2._column_class == self.t._column_class assert isinstance(t2, table_data.Table) def test_list_index_slice(self, table_data): """Table list index slice returns COPY of data""" self.t = table_data.Table(table_data.COLS) self.tc = self.t.columns slice = [0, 2] t2 = self.t[slice] assert np.all(t2["a"] == table_data.DATA["a"][slice]) assert t2["a"].attrs_equal(table_data.COLS[0]) assert t2["b"].attrs_equal(table_data.COLS[1]) assert t2["c"].attrs_equal(table_data.COLS[2]) t2["a"][0] = 0 assert np.all(self.t.as_array() == table_data.DATA) assert np.any(t2["a"] != table_data.DATA["a"][slice]) assert t2.masked == self.t.masked assert t2._column_class == self.t._column_class assert isinstance(t2, table_data.Table) def test_select_columns(self, table_data): """Select columns returns COPY of data and all column attributes""" self.t = table_data.Table(table_data.COLS) self.tc = self.t.columns # try both lists and tuples for columns in (("a", "c"), ["a", "c"]): t2 = self.t[columns] assert np.all(t2["a"] == table_data.DATA["a"]) assert np.all(t2["c"] == table_data.DATA["c"]) assert t2["a"].attrs_equal(table_data.COLS[0]) assert t2["c"].attrs_equal(table_data.COLS[2]) t2["a"][0] = 0 assert np.all(self.t.as_array() == table_data.DATA) assert np.any(t2["a"] != table_data.DATA["a"]) assert t2.masked == self.t.masked assert t2._column_class == self.t._column_class def test_select_columns_fail(self, table_data): """Selecting a column that doesn't exist fails""" self.t = table_data.Table(table_data.COLS) with pytest.raises(KeyError) as err: self.t[["xxxx"]] assert "'xxxx'" in str(err.value) with pytest.raises(KeyError) as err: self.t[["xxxx", "yyyy"]] assert "'xxxx'" in str(err.value) def test_np_where(self, table_data): """Select rows using output of np.where""" t = table_data.Table(table_data.COLS) # Select last two rows rows = np.where(t["a"] > 1.5) t2 = t[rows] assert np.all(t2["a"] == [2, 3]) assert np.all(t2["b"] == [5, 6]) assert isinstance(t2, table_data.Table) # Select no rows rows = np.where(t["a"] > 100) t2 = t[rows] assert len(t2) == 0 assert isinstance(t2, table_data.Table) def test_np_integers(self, table_data): """ Select rows using numpy integers. This is a regression test for a py 3.3 failure mode """ t = table_data.Table(table_data.COLS) idxs = np.random.randint(len(t), size=2) t[idxs[1]] def test_select_bad_column(self, table_data): """Select column name that does not exist""" self.t = table_data.Table(table_data.COLS) self.tc = self.t.columns with pytest.raises(ValueError): self.t["a", 1]
fc74e14c687414c84e4fde013b4907f097a81bc10f49fe31979fca86f110564f
# Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy import table from astropy.table import pprint class MyRow(table.Row): def __str__(self): return str(self.as_void()) class MyColumn(table.Column): pass class MyMaskedColumn(table.MaskedColumn): pass class MyTableColumns(table.TableColumns): pass class MyTableFormatter(pprint.TableFormatter): pass class MyTable(table.Table): Row = MyRow Column = MyColumn MaskedColumn = MyMaskedColumn TableColumns = MyTableColumns TableFormatter = MyTableFormatter def test_simple_subclass(): t = MyTable([[1, 2], [3, 4]]) row = t[0] assert isinstance(row, MyRow) assert isinstance(t["col0"], MyColumn) assert isinstance(t.columns, MyTableColumns) assert isinstance(t.formatter, MyTableFormatter) t2 = MyTable(t) row = t2[0] assert isinstance(row, MyRow) assert str(row) == "(1, 3)" t3 = table.Table(t) row = t3[0] assert not isinstance(row, MyRow) assert str(row) != "(1, 3)" t = MyTable([[1, 2], [3, 4]], masked=True) row = t[0] assert isinstance(row, MyRow) assert str(row) == "(1, 3)" assert isinstance(t["col0"], MyMaskedColumn) assert isinstance(t.formatter, MyTableFormatter) class ParamsRow(table.Row): """ Row class that allows access to an arbitrary dict of parameters stored as a dict object in the ``params`` column. """ def __getitem__(self, item): if item not in self.colnames: return super().__getitem__("params")[item] else: return super().__getitem__(item) def keys(self): out = [name for name in self.colnames if name != "params"] params = [key.lower() for key in sorted(self["params"])] return out + params def values(self): return [self[key] for key in self.keys()] class ParamsTable(table.Table): Row = ParamsRow def test_params_table(): t = ParamsTable(names=["a", "b", "params"], dtype=["i", "f", "O"]) t.add_row((1, 2.0, {"x": 1.5, "y": 2.5})) t.add_row((2, 3.0, {"z": "hello", "id": 123123})) assert t["params"][0] == {"x": 1.5, "y": 2.5} assert t[0]["params"] == {"x": 1.5, "y": 2.5} assert t[0]["y"] == 2.5 assert t[1]["id"] == 123123 assert list(t[1].keys()) == ["a", "b", "id", "z"] assert list(t[1].values()) == [2, 3.0, 123123, "hello"]
af24adfc6f8f96ed6a348790a0ca01bc6263cb34203ce08a2b310cfee3533ae0
import pickle import numpy as np from astropy.coordinates import SkyCoord from astropy.table import Column, MaskedColumn, QTable, Table from astropy.table.table_helpers import simple_table from astropy.time import Time from astropy.units import Quantity, deg def test_pickle_column(protocol): c = Column( data=[1, 2], name="a", format="%05d", description="col a", unit="cm", meta={"a": 1}, ) cs = pickle.dumps(c) cp = pickle.loads(cs) assert np.all(cp == c) assert cp.attrs_equal(c) assert cp._parent_table is None assert repr(c) == repr(cp) def test_pickle_masked_column(protocol): c = MaskedColumn( data=[1, 2], name="a", format="%05d", description="col a", unit="cm", meta={"a": 1}, ) c.mask[1] = True c.fill_value = -99 cs = pickle.dumps(c) cp = pickle.loads(cs) assert np.all(cp._data == c._data) assert np.all(cp.mask == c.mask) assert cp.attrs_equal(c) assert cp.fill_value == -99 assert cp._parent_table is None assert repr(c) == repr(cp) def test_pickle_multidimensional_column(protocol): """Regression test for https://github.com/astropy/astropy/issues/4098""" a = np.zeros((3, 2)) c = Column(a, name="a") cs = pickle.dumps(c) cp = pickle.loads(cs) assert np.all(c == cp) assert c.shape == cp.shape assert cp.attrs_equal(c) assert repr(c) == repr(cp) def test_pickle_table(protocol): a = Column( data=[1, 2], name="a", format="%05d", description="col a", unit="cm", meta={"a": 1}, ) b = Column( data=[3.0, 4.0], name="b", format="%05d", description="col b", unit="cm", meta={"b": 1}, ) for table_class in Table, QTable: t = table_class([a, b], meta={"a": 1, "b": Quantity(10, unit="s")}) t["c"] = Quantity([1, 2], unit="m") t["d"] = Time(["2001-01-02T12:34:56", "2001-02-03T00:01:02"]) t["e"] = SkyCoord([125.0, 180.0] * deg, [-45.0, 36.5] * deg) ts = pickle.dumps(t) tp = pickle.loads(ts) assert tp.__class__ is table_class assert np.all(tp["a"] == t["a"]) assert np.all(tp["b"] == t["b"]) # test mixin columns assert np.all(tp["c"] == t["c"]) assert np.all(tp["d"] == t["d"]) assert np.all(tp["e"].ra == t["e"].ra) assert np.all(tp["e"].dec == t["e"].dec) assert type(tp["c"]) is type(t["c"]) # nopep8 assert type(tp["d"]) is type(t["d"]) # nopep8 assert type(tp["e"]) is type(t["e"]) # nopep8 assert tp.meta == t.meta assert type(tp) is type(t) assert isinstance(tp["c"], Quantity if (table_class is QTable) else Column) def test_pickle_masked_table(protocol): a = Column( data=[1, 2], name="a", format="%05d", description="col a", unit="cm", meta={"a": 1}, ) b = Column( data=[3.0, 4.0], name="b", format="%05d", description="col b", unit="cm", meta={"b": 1}, ) t = Table([a, b], meta={"a": 1}, masked=True) t["a"].mask[1] = True t["a"].fill_value = -99 ts = pickle.dumps(t) tp = pickle.loads(ts) for colname in ("a", "b"): for attr in ("_data", "mask", "fill_value"): assert np.all(getattr(tp[colname], attr) == getattr(tp[colname], attr)) assert tp["a"].attrs_equal(t["a"]) assert tp["b"].attrs_equal(t["b"]) assert tp.meta == t.meta def test_pickle_indexed_table(protocol): """ Ensure that any indices that have been added will survive pickling. """ t = simple_table() t.add_index("a") t.add_index(["a", "b"]) ts = pickle.dumps(t) tp = pickle.loads(ts) assert len(t.indices) == len(tp.indices) for index, indexp in zip(t.indices, tp.indices): assert np.all(index.data.data == indexp.data.data) assert index.data.data.colnames == indexp.data.data.colnames
a208c8273800d886aa0b4e15641e8ea82f0c8d1d554b187d682feb76f88049c4
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ All of the pytest fixtures used by astropy.table are defined here. `conftest.py` is a "special" module name for pytest that is always imported, but is not looked in for tests, and it is the recommended place to put fixtures that are shared between modules. These fixtures can not be defined in a module by a different name and still be shared between modules. """ import pickle from collections import OrderedDict from copy import deepcopy import numpy as np import pytest from astropy import coordinates, table, time from astropy import units as u from astropy.table import QTable, Table, pprint from astropy.table.table_helpers import ArrayWrapper @pytest.fixture(params=[table.Column, table.MaskedColumn]) def Column(request): # Fixture to run all the Column tests for both an unmasked (ndarray) # and masked (MaskedArray) column. return request.param class MaskedTable(table.Table): def __init__(self, *args, **kwargs): kwargs["masked"] = True table.Table.__init__(self, *args, **kwargs) class MyRow(table.Row): pass class MyColumn(table.Column): pass class MyMaskedColumn(table.MaskedColumn): pass class MyTableColumns(table.TableColumns): pass class MyTableFormatter(pprint.TableFormatter): pass class MyTable(table.Table): Row = MyRow Column = MyColumn MaskedColumn = MyMaskedColumn TableColumns = MyTableColumns TableFormatter = MyTableFormatter # Fixture to run all the Column tests for both an unmasked (ndarray) # and masked (MaskedArray) column. @pytest.fixture(params=["unmasked", "masked", "subclass"]) def table_types(request): class TableTypes: def __init__(self, request): if request.param == "unmasked": self.Table = table.Table self.Column = table.Column elif request.param == "masked": self.Table = MaskedTable self.Column = table.MaskedColumn elif request.param == "subclass": self.Table = MyTable self.Column = MyColumn return TableTypes(request) # Fixture to run all the Column tests for both an unmasked (ndarray) # and masked (MaskedArray) column. @pytest.fixture(params=[False, True]) def table_data(request): class TableData: def __init__(self, request): self.Table = MaskedTable if request.param else table.Table self.Column = table.MaskedColumn if request.param else table.Column self.COLS = [ self.Column( name="a", data=[1, 2, 3], description="da", format="%i", meta={"ma": 1}, unit="ua", ), self.Column( name="b", data=[4, 5, 6], description="db", format="%d", meta={"mb": 1}, unit="ub", ), self.Column( name="c", data=[7, 8, 9], description="dc", format="%f", meta={"mc": 1}, unit="ub", ), ] self.DATA = self.Table(self.COLS) return TableData(request) class SubclassTable(table.Table): pass @pytest.fixture(params=[True, False]) def tableclass(request): return table.Table if request.param else SubclassTable @pytest.fixture(params=list(range(0, pickle.HIGHEST_PROTOCOL + 1))) def protocol(request): """ Fixture to run all the tests for all available pickle protocols. """ return request.param # Fixture to run all tests for both an unmasked (ndarray) and masked # (MaskedArray) column. @pytest.fixture(params=[False, True]) def table_type(request): return MaskedTable if request.param else table.Table # Stuff for testing mixin columns MIXIN_COLS = { "quantity": [0, 1, 2, 3] * u.m, "longitude": coordinates.Longitude( [0.0, 1.0, 5.0, 6.0] * u.deg, wrap_angle=180.0 * u.deg ), "latitude": coordinates.Latitude([5.0, 6.0, 10.0, 11.0] * u.deg), "time": time.Time([2000, 2001, 2002, 2003], format="jyear"), "timedelta": time.TimeDelta([1, 2, 3, 4], format="jd"), "skycoord": coordinates.SkyCoord(ra=[0, 1, 2, 3] * u.deg, dec=[0, 1, 2, 3] * u.deg), "sphericalrep": coordinates.SphericalRepresentation( [0, 1, 2, 3] * u.deg, [0, 1, 2, 3] * u.deg, 1 * u.kpc ), "cartesianrep": coordinates.CartesianRepresentation( [0, 1, 2, 3] * u.pc, [4, 5, 6, 7] * u.pc, [9, 8, 8, 6] * u.pc ), "sphericaldiff": coordinates.SphericalCosLatDifferential( [0, 1, 2, 3] * u.mas / u.yr, [0, 1, 2, 3] * u.mas / u.yr, 10 * u.km / u.s ), "arraywrap": ArrayWrapper([0, 1, 2, 3]), "arrayswap": ArrayWrapper(np.arange(4, dtype="i").byteswap().newbyteorder()), "ndarraylil": np.array( [(7, "a"), (8, "b"), (9, "c"), (9, "c")], dtype="<i4,|S1" ).view(table.NdarrayMixin), "ndarraybig": np.array( [(7, "a"), (8, "b"), (9, "c"), (9, "c")], dtype=">i4,|S1" ).view(table.NdarrayMixin), } MIXIN_COLS["earthlocation"] = coordinates.EarthLocation( lon=MIXIN_COLS["longitude"], lat=MIXIN_COLS["latitude"], height=MIXIN_COLS["quantity"], ) MIXIN_COLS["sphericalrepdiff"] = coordinates.SphericalRepresentation( MIXIN_COLS["sphericalrep"], differentials=MIXIN_COLS["sphericaldiff"] ) @pytest.fixture(params=sorted(MIXIN_COLS)) def mixin_cols(request): """ Fixture to return a set of columns for mixin testing which includes an index column 'i', two string cols 'a', 'b' (for joins etc), and one of the available mixin column types. """ cols = OrderedDict() mixin_cols = deepcopy(MIXIN_COLS) cols["i"] = table.Column([0, 1, 2, 3], name="i") cols["a"] = table.Column(["a", "b", "b", "c"], name="a") cols["b"] = table.Column(["b", "c", "a", "d"], name="b") cols["m"] = mixin_cols[request.param] return cols @pytest.fixture(params=[False, True]) def T1(request): T = QTable.read( [ " a b c d", " 2 c 7.0 0", " 2 b 5.0 1", " 2 b 6.0 2", " 2 a 4.0 3", " 0 a 0.0 4", " 1 b 3.0 5", " 1 a 2.0 6", " 1 a 1.0 7", ], format="ascii", ) T["q"] = np.arange(len(T)) * u.m T.meta.update({"ta": 1}) T["c"].meta.update({"a": 1}) T["c"].description = "column c" if request.param: T.add_index("a") return T @pytest.fixture(params=[Table, QTable]) def operation_table_type(request): return request.param
5b8cfc86b24def71b72dc71e4022048dfbc5e9e1a1b3ce203b8f662abd94f063
# Licensed under a 3-clause BSD style license - see LICENSE.rst from collections import OrderedDict from contextlib import nullcontext import numpy as np import pytest from astropy import table from astropy import units as u from astropy.coordinates import ( BaseRepresentationOrDifferential, CartesianRepresentation, SkyCoord, SphericalRepresentation, UnitSphericalRepresentation, search_around_3d, ) from astropy.coordinates.earth import EarthLocation from astropy.coordinates.tests.helper import skycoord_equal from astropy.coordinates.tests.test_representation import representation_equal from astropy.table import Column, MaskedColumn, QTable, Table, TableMergeError from astropy.table.operations import _get_out_class, join_distance, join_skycoord from astropy.time import Time, TimeDelta from astropy.units.quantity import Quantity from astropy.utils import metadata from astropy.utils.compat.optional_deps import HAS_SCIPY from astropy.utils.metadata import MergeConflictError def sort_eq(list1, list2): return sorted(list1) == sorted(list2) def check_mask(col, exp_mask): """Check that col.mask == exp_mask""" if hasattr(col, "mask"): # Coerce expected mask into dtype of col.mask. In particular this is # needed for types like EarthLocation where the mask is a structured # array. exp_mask = np.array(exp_mask).astype(col.mask.dtype) out = np.all(col.mask == exp_mask) else: # With no mask the check is OK if all the expected mask values # are False (i.e. no auto-conversion to MaskedQuantity if it was # not required by the join). out = np.all(exp_mask == False) # noqa: E712 return out class TestJoin: def _setup(self, t_cls=Table): lines1 = [ " a b c ", " 0 foo L1", " 1 foo L2", " 1 bar L3", " 2 bar L4", ] lines2 = [ " a b d ", " 1 foo R1", " 1 foo R2", " 2 bar R3", " 4 bar R4", ] self.t1 = t_cls.read(lines1, format="ascii") self.t2 = t_cls.read(lines2, format="ascii") self.t3 = t_cls(self.t2, copy=True) self.t1.meta.update(OrderedDict([("b", [1, 2]), ("c", {"a": 1}), ("d", 1)])) self.t2.meta.update(OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)])) self.t3.meta.update(OrderedDict([("b", 3), ("c", [1, 2]), ("d", 2), ("a", 1)])) self.meta_merge = OrderedDict( [ ("b", [1, 2, 3, 4]), ("c", {"a": 1, "b": 1}), ("d", 1), ("a", 1), ] ) def test_table_meta_merge(self, operation_table_type): self._setup(operation_table_type) out = table.join(self.t1, self.t2, join_type="inner") assert out.meta == self.meta_merge def test_table_meta_merge_conflict(self, operation_table_type): self._setup(operation_table_type) with pytest.warns(metadata.MergeConflictWarning) as w: out = table.join(self.t1, self.t3, join_type="inner") assert len(w) == 3 assert out.meta == self.t3.meta with pytest.warns(metadata.MergeConflictWarning) as w: out = table.join( self.t1, self.t3, join_type="inner", metadata_conflicts="warn" ) assert len(w) == 3 assert out.meta == self.t3.meta out = table.join( self.t1, self.t3, join_type="inner", metadata_conflicts="silent" ) assert out.meta == self.t3.meta with pytest.raises(MergeConflictError): out = table.join( self.t1, self.t3, join_type="inner", metadata_conflicts="error" ) with pytest.raises(ValueError): out = table.join( self.t1, self.t3, join_type="inner", metadata_conflicts="nonsense" ) def test_both_unmasked_inner(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 # Basic join with default parameters (inner join on common keys) t12 = table.join(t1, t2) assert type(t12) is operation_table_type assert type(t12["a"]) is type(t1["a"]) assert type(t12["b"]) is type(t1["b"]) assert type(t12["c"]) is type(t1["c"]) assert type(t12["d"]) is type(t2["d"]) assert t12.masked is False assert sort_eq( t12.pformat(), [ " a b c d ", "--- --- --- ---", " 1 foo L2 R1", " 1 foo L2 R2", " 2 bar L4 R3", ], ) # Table meta merged properly assert t12.meta == self.meta_merge def test_both_unmasked_left_right_outer(self, operation_table_type): if operation_table_type is QTable: pytest.xfail("Quantity columns do not support masking.") self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 # Left join t12 = table.join(t1, t2, join_type="left") assert t12.has_masked_columns is True assert t12.masked is False for name in ("a", "b", "c"): assert type(t12[name]) is Column assert type(t12["d"]) is MaskedColumn assert sort_eq( t12.pformat(), [ " a b c d ", "--- --- --- ---", " 0 foo L1 --", " 1 bar L3 --", " 1 foo L2 R1", " 1 foo L2 R2", " 2 bar L4 R3", ], ) # Right join t12 = table.join(t1, t2, join_type="right") assert t12.has_masked_columns is True assert t12.masked is False assert sort_eq( t12.pformat(), [ " a b c d ", "--- --- --- ---", " 1 foo L2 R1", " 1 foo L2 R2", " 2 bar L4 R3", " 4 bar -- R4", ], ) # Outer join t12 = table.join(t1, t2, join_type="outer") assert t12.has_masked_columns is True assert t12.masked is False assert sort_eq( t12.pformat(), [ " a b c d ", "--- --- --- ---", " 0 foo L1 --", " 1 bar L3 --", " 1 foo L2 R1", " 1 foo L2 R2", " 2 bar L4 R3", " 4 bar -- R4", ], ) # Check that the common keys are 'a', 'b' t12a = table.join(t1, t2, join_type="outer") t12b = table.join(t1, t2, join_type="outer", keys=["a", "b"]) assert np.all(t12a.as_array() == t12b.as_array()) def test_both_unmasked_single_key_inner(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 # Inner join on 'a' column t12 = table.join(t1, t2, keys="a") assert type(t12) is operation_table_type assert type(t12["a"]) is type(t1["a"]) assert type(t12["b_1"]) is type(t1["b"]) assert type(t12["c"]) is type(t1["c"]) assert type(t12["b_2"]) is type(t2["b"]) assert type(t12["d"]) is type(t2["d"]) assert t12.masked is False assert sort_eq( t12.pformat(), [ " a b_1 c b_2 d ", "--- --- --- --- ---", " 1 foo L2 foo R1", " 1 foo L2 foo R2", " 1 bar L3 foo R1", " 1 bar L3 foo R2", " 2 bar L4 bar R3", ], ) def test_both_unmasked_single_key_left_right_outer(self, operation_table_type): if operation_table_type is QTable: pytest.xfail("Quantity columns do not support masking.") self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 # Left join t12 = table.join(t1, t2, join_type="left", keys="a") assert t12.has_masked_columns is True assert sort_eq( t12.pformat(), [ " a b_1 c b_2 d ", "--- --- --- --- ---", " 0 foo L1 -- --", " 1 foo L2 foo R1", " 1 foo L2 foo R2", " 1 bar L3 foo R1", " 1 bar L3 foo R2", " 2 bar L4 bar R3", ], ) # Right join t12 = table.join(t1, t2, join_type="right", keys="a") assert t12.has_masked_columns is True assert sort_eq( t12.pformat(), [ " a b_1 c b_2 d ", "--- --- --- --- ---", " 1 foo L2 foo R1", " 1 foo L2 foo R2", " 1 bar L3 foo R1", " 1 bar L3 foo R2", " 2 bar L4 bar R3", " 4 -- -- bar R4", ], ) # Outer join t12 = table.join(t1, t2, join_type="outer", keys="a") assert t12.has_masked_columns is True assert sort_eq( t12.pformat(), [ " a b_1 c b_2 d ", "--- --- --- --- ---", " 0 foo L1 -- --", " 1 foo L2 foo R1", " 1 foo L2 foo R2", " 1 bar L3 foo R1", " 1 bar L3 foo R2", " 2 bar L4 bar R3", " 4 -- -- bar R4", ], ) def test_masked_unmasked(self, operation_table_type): if operation_table_type is QTable: pytest.xfail("Quantity columns do not support masking.") self._setup(operation_table_type) t1 = self.t1 t1m = operation_table_type(self.t1, masked=True) t2 = self.t2 # Result table is never masked t1m2 = table.join(t1m, t2, join_type="inner") assert t1m2.masked is False # Result should match non-masked result t12 = table.join(t1, t2) assert np.all(t12.as_array() == np.array(t1m2)) # Mask out some values in left table and make sure they propagate t1m["b"].mask[1] = True t1m["c"].mask[2] = True t1m2 = table.join(t1m, t2, join_type="inner", keys="a") assert sort_eq( t1m2.pformat(), [ " a b_1 c b_2 d ", "--- --- --- --- ---", " 1 -- L2 foo R1", " 1 -- L2 foo R2", " 1 bar -- foo R1", " 1 bar -- foo R2", " 2 bar L4 bar R3", ], ) t21m = table.join(t2, t1m, join_type="inner", keys="a") assert sort_eq( t21m.pformat(), [ " a b_1 d b_2 c ", "--- --- --- --- ---", " 1 foo R2 -- L2", " 1 foo R2 bar --", " 1 foo R1 -- L2", " 1 foo R1 bar --", " 2 bar R3 bar L4", ], ) def test_masked_masked(self, operation_table_type): self._setup(operation_table_type) """Two masked tables""" if operation_table_type is QTable: pytest.xfail("Quantity columns do not support masking.") t1 = self.t1 t1m = operation_table_type(self.t1, masked=True) t2 = self.t2 t2m = operation_table_type(self.t2, masked=True) # Result table is never masked but original column types are preserved t1m2m = table.join(t1m, t2m, join_type="inner") assert t1m2m.masked is False for col in t1m2m.itercols(): assert type(col) is MaskedColumn # Result should match non-masked result t12 = table.join(t1, t2) assert np.all(t12.as_array() == np.array(t1m2m)) # Mask out some values in both tables and make sure they propagate t1m["b"].mask[1] = True t1m["c"].mask[2] = True t2m["d"].mask[2] = True t1m2m = table.join(t1m, t2m, join_type="inner", keys="a") assert sort_eq( t1m2m.pformat(), [ " a b_1 c b_2 d ", "--- --- --- --- ---", " 1 -- L2 foo R1", " 1 -- L2 foo R2", " 1 bar -- foo R1", " 1 bar -- foo R2", " 2 bar L4 bar --", ], ) def test_classes(self): """Ensure that classes and subclasses get through as expected""" class MyCol(Column): pass class MyMaskedCol(MaskedColumn): pass t1 = Table() t1["a"] = MyCol([1]) t1["b"] = MyCol([2]) t1["c"] = MyMaskedCol([3]) t2 = Table() t2["a"] = Column([1, 2]) t2["d"] = MyCol([3, 4]) t2["e"] = MyMaskedCol([5, 6]) t12 = table.join(t1, t2, join_type="inner") for name, exp_type in ( ("a", MyCol), ("b", MyCol), ("c", MyMaskedCol), ("d", MyCol), ("e", MyMaskedCol), ): assert type(t12[name] is exp_type) t21 = table.join(t2, t1, join_type="left") # Note col 'b' gets upgraded from MyCol to MaskedColumn since it needs to be # masked, but col 'c' stays since MyMaskedCol supports masking. for name, exp_type in ( ("a", MyCol), ("b", MaskedColumn), ("c", MyMaskedCol), ("d", MyCol), ("e", MyMaskedCol), ): assert type(t21[name] is exp_type) def test_col_rename(self, operation_table_type): self._setup(operation_table_type) """ Test auto col renaming when there is a conflict. Use non-default values of uniq_col_name and table_names. """ t1 = self.t1 t2 = self.t2 t12 = table.join( t1, t2, uniq_col_name="x_{table_name}_{col_name}_y", table_names=["L", "R"], keys="a", ) assert t12.colnames == ["a", "x_L_b_y", "c", "x_R_b_y", "d"] def test_rename_conflict(self, operation_table_type): self._setup(operation_table_type) """ Test that auto-column rename fails because of a conflict with an existing column """ t1 = self.t1 t2 = self.t2 t1["b_1"] = 1 # Add a new column b_1 that will conflict with auto-rename with pytest.raises(TableMergeError): table.join(t1, t2, keys="a") def test_missing_keys(self, operation_table_type): self._setup(operation_table_type) """Merge on a key column that doesn't exist""" t1 = self.t1 t2 = self.t2 with pytest.raises(TableMergeError): table.join(t1, t2, keys=["a", "not there"]) def test_bad_join_type(self, operation_table_type): self._setup(operation_table_type) """Bad join_type input""" t1 = self.t1 t2 = self.t2 with pytest.raises(ValueError): table.join(t1, t2, join_type="illegal value") def test_no_common_keys(self, operation_table_type): self._setup(operation_table_type) """Merge tables with no common keys""" t1 = self.t1 t2 = self.t2 del t1["a"] del t1["b"] del t2["a"] del t2["b"] with pytest.raises(TableMergeError): table.join(t1, t2) def test_masked_key_column(self, operation_table_type): self._setup(operation_table_type) """Merge on a key column that has a masked element""" if operation_table_type is QTable: pytest.xfail("Quantity columns do not support masking.") t1 = self.t1 t2 = operation_table_type(self.t2, masked=True) table.join(t1, t2) # OK t2["a"].mask[0] = True with pytest.raises(TableMergeError): table.join(t1, t2) def test_col_meta_merge(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t2.rename_column("d", "c") # force col conflict and renaming meta1 = OrderedDict([("b", [1, 2]), ("c", {"a": 1}), ("d", 1)]) meta2 = OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)]) # Key col 'a', should first value ('cm') t1["a"].unit = "cm" t2["a"].unit = "m" # Key col 'b', take first value 't1_b' t1["b"].info.description = "t1_b" # Key col 'b', take first non-empty value 't1_b' t2["b"].info.format = "%6s" # Key col 'a', should be merged meta t1["a"].info.meta = meta1 t2["a"].info.meta = meta2 # Key col 'b', should be meta2 t2["b"].info.meta = meta2 # All these should pass through t1["c"].info.format = "%3s" t1["c"].info.description = "t1_c" t2["c"].info.format = "%6s" t2["c"].info.description = "t2_c" if operation_table_type is Table: ctx = pytest.warns( metadata.MergeConflictWarning, match=( r"In merged column 'a' the 'unit' attribute does not match \(cm" r" != m\)" ), ) else: ctx = nullcontext() with ctx: t12 = table.join(t1, t2, keys=["a", "b"]) assert t12["a"].unit == "m" assert t12["b"].info.description == "t1_b" assert t12["b"].info.format == "%6s" assert t12["a"].info.meta == self.meta_merge assert t12["b"].info.meta == meta2 assert t12["c_1"].info.format == "%3s" assert t12["c_1"].info.description == "t1_c" assert t12["c_2"].info.format == "%6s" assert t12["c_2"].info.description == "t2_c" def test_join_multidimensional(self, operation_table_type): self._setup(operation_table_type) # Regression test for #2984, which was an issue where join did not work # on multi-dimensional columns. t1 = operation_table_type() t1["a"] = [1, 2, 3] t1["b"] = np.ones((3, 4)) t2 = operation_table_type() t2["a"] = [1, 2, 3] t2["c"] = [4, 5, 6] t3 = table.join(t1, t2) np.testing.assert_allclose(t3["a"], t1["a"]) np.testing.assert_allclose(t3["b"], t1["b"]) np.testing.assert_allclose(t3["c"], t2["c"]) def test_join_multidimensional_masked(self, operation_table_type): self._setup(operation_table_type) """ Test for outer join with multidimensional columns where masking is required. (Issue #4059). """ if operation_table_type is QTable: pytest.xfail("Quantity columns do not support masking.") a = table.MaskedColumn([1, 2, 3], name="a") a2 = table.Column([1, 3, 4], name="a") b = table.MaskedColumn( [ [1, 2], [3, 4], [5, 6], ], name="b", mask=[ [1, 0], [0, 1], [0, 0], ], ) c = table.Column( [ [1, 1], [2, 2], [3, 3], ], name="c", ) t1 = operation_table_type([a, b]) t2 = operation_table_type([a2, c]) t12 = table.join(t1, t2, join_type="inner") assert np.all( t12["b"].mask == [ [True, False], [False, False], ] ) assert not hasattr(t12["c"], "mask") t12 = table.join(t1, t2, join_type="outer") assert np.all( t12["b"].mask == [ [True, False], [False, True], [False, False], [True, True], ] ) assert np.all( t12["c"].mask == [ [False, False], [True, True], [False, False], [False, False], ] ) def test_mixin_functionality(self, mixin_cols): col = mixin_cols["m"] cls_name = type(col).__name__ len_col = len(col) idx = np.arange(len_col) t1 = table.QTable([idx, col], names=["idx", "m1"]) t2 = table.QTable([idx, col], names=["idx", "m2"]) # Set up join mismatches for different join_type cases t1 = t1[[0, 1, 3]] t2 = t2[[0, 2, 3]] # Test inner join, which works for all mixin_cols out = table.join(t1, t2, join_type="inner") assert len(out) == 2 assert out["m2"].__class__ is col.__class__ assert np.all(out["idx"] == [0, 3]) if cls_name == "SkyCoord": # SkyCoord doesn't support __eq__ so use our own assert skycoord_equal(out["m1"], col[[0, 3]]) assert skycoord_equal(out["m2"], col[[0, 3]]) elif "Repr" in cls_name or "Diff" in cls_name: assert np.all(representation_equal(out["m1"], col[[0, 3]])) assert np.all(representation_equal(out["m2"], col[[0, 3]])) else: assert np.all(out["m1"] == col[[0, 3]]) assert np.all(out["m2"] == col[[0, 3]]) # Check for left, right, outer join which requires masking. Works for # the listed mixins classes. if isinstance(col, (Quantity, Time, TimeDelta)): out = table.join(t1, t2, join_type="left") assert len(out) == 3 assert np.all(out["idx"] == [0, 1, 3]) assert np.all(out["m1"] == t1["m1"]) assert np.all(out["m2"] == t2["m2"]) check_mask(out["m1"], [False, False, False]) check_mask(out["m2"], [False, True, False]) out = table.join(t1, t2, join_type="right") assert len(out) == 3 assert np.all(out["idx"] == [0, 2, 3]) assert np.all(out["m1"] == t1["m1"]) assert np.all(out["m2"] == t2["m2"]) check_mask(out["m1"], [False, True, False]) check_mask(out["m2"], [False, False, False]) out = table.join(t1, t2, join_type="outer") assert len(out) == 4 assert np.all(out["idx"] == [0, 1, 2, 3]) assert np.all(out["m1"] == col) assert np.all(out["m2"] == col) assert check_mask(out["m1"], [False, False, True, False]) assert check_mask(out["m2"], [False, True, False, False]) else: # Otherwise make sure it fails with the right exception message for join_type in ("outer", "left", "right"): with pytest.raises(NotImplementedError) as err: table.join(t1, t2, join_type=join_type) assert "join requires masking" in str( err.value ) or "join unavailable" in str(err.value) def test_cartesian_join(self, operation_table_type): t1 = Table(rows=[(1, "a"), (2, "b")], names=["a", "b"]) t2 = Table(rows=[(3, "c"), (4, "d")], names=["a", "c"]) t12 = table.join(t1, t2, join_type="cartesian") assert t1.colnames == ["a", "b"] assert t2.colnames == ["a", "c"] assert len(t12) == len(t1) * len(t2) assert str(t12).splitlines() == [ "a_1 b a_2 c ", "--- --- --- ---", " 1 a 3 c", " 1 a 4 d", " 2 b 3 c", " 2 b 4 d", ] with pytest.raises(ValueError, match="cannot supply keys for a cartesian join"): t12 = table.join(t1, t2, join_type="cartesian", keys="a") @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_join_with_join_skycoord_sky(self): sc1 = SkyCoord([0, 1, 1.1, 2], [0, 0, 0, 0], unit="deg") sc2 = SkyCoord([0.5, 1.05, 2.1], [0, 0, 0], unit="deg") t1 = Table([sc1], names=["sc"]) t2 = Table([sc2], names=["sc"]) t12 = table.join(t1, t2, join_funcs={"sc": join_skycoord(0.2 * u.deg)}) exp = [ "sc_id sc_1 sc_2 ", " deg,deg deg,deg ", "----- ------- --------", " 1 1.0,0.0 1.05,0.0", " 1 1.1,0.0 1.05,0.0", " 2 2.0,0.0 2.1,0.0", ] assert str(t12).splitlines() == exp @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("distance_func", ["search_around_3d", search_around_3d]) def test_join_with_join_skycoord_3d(self, distance_func): sc1 = SkyCoord([0, 1, 1.1, 2] * u.deg, [0, 0, 0, 0] * u.deg, [1, 1, 2, 1] * u.m) sc2 = SkyCoord([0.5, 1.05, 2.1] * u.deg, [0, 0, 0] * u.deg, [1, 1, 1] * u.m) t1 = Table([sc1], names=["sc"]) t2 = Table([sc2], names=["sc"]) join_func = join_skycoord(np.deg2rad(0.2) * u.m, distance_func=distance_func) t12 = table.join(t1, t2, join_funcs={"sc": join_func}) exp = [ "sc_id sc_1 sc_2 ", " deg,deg,m deg,deg,m ", "----- ----------- ------------", " 1 1.0,0.0,1.0 1.05,0.0,1.0", " 2 2.0,0.0,1.0 2.1,0.0,1.0", ] assert str(t12).splitlines() == exp @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_join_with_join_distance_1d(self): c1 = [0, 1, 1.1, 2] c2 = [0.5, 1.05, 2.1] t1 = Table([c1], names=["col"]) t2 = Table([c2], names=["col"]) join_func = join_distance( 0.2, kdtree_args={"leafsize": 32}, query_args={"p": 2} ) t12 = table.join(t1, t2, join_type="outer", join_funcs={"col": join_func}) exp = [ "col_id col_1 col_2", "------ ----- -----", " 1 1.0 1.05", " 1 1.1 1.05", " 2 2.0 2.1", " 3 0.0 --", " 4 -- 0.5", ] assert str(t12).splitlines() == exp @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_join_with_join_distance_1d_multikey(self): from astropy.table.operations import _apply_join_funcs c1 = [0, 1, 1.1, 1.2, 2] id1 = [0, 1, 2, 2, 3] o1 = ["a", "b", "c", "d", "e"] c2 = [0.5, 1.05, 2.1] id2 = [0, 2, 4] o2 = ["z", "y", "x"] t1 = Table([c1, id1, o1], names=["col", "id", "o1"]) t2 = Table([c2, id2, o2], names=["col", "id", "o2"]) join_func = join_distance(0.2) join_funcs = {"col": join_func} t12 = table.join(t1, t2, join_type="outer", join_funcs=join_funcs) exp = [ "col_id col_1 id o1 col_2 o2", "------ ----- --- --- ----- ---", " 1 1.0 1 b -- --", " 1 1.1 2 c 1.05 y", " 1 1.2 2 d 1.05 y", " 2 2.0 3 e -- --", " 2 -- 4 -- 2.1 x", " 3 0.0 0 a -- --", " 4 -- 0 -- 0.5 z", ] assert str(t12).splitlines() == exp left, right, keys = _apply_join_funcs(t1, t2, ("col", "id"), join_funcs) assert keys == ("col_id", "id") @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_join_with_join_distance_1d_quantity(self): c1 = [0, 1, 1.1, 2] * u.m c2 = [500, 1050, 2100] * u.mm t1 = QTable([c1], names=["col"]) t2 = QTable([c2], names=["col"]) join_func = join_distance(20 * u.cm) t12 = table.join(t1, t2, join_funcs={"col": join_func}) exp = [ "col_id col_1 col_2 ", " m mm ", "------ ----- ------", " 1 1.0 1050.0", " 1 1.1 1050.0", " 2 2.0 2100.0", ] assert str(t12).splitlines() == exp # Generate column name conflict t2["col_id"] = [0, 0, 0] t2["col__id"] = [0, 0, 0] t12 = table.join(t1, t2, join_funcs={"col": join_func}) exp = [ "col___id col_1 col_2 col_id col__id", " m mm ", "-------- ----- ------ ------ -------", " 1 1.0 1050.0 0 0", " 1 1.1 1050.0 0 0", " 2 2.0 2100.0 0 0", ] assert str(t12).splitlines() == exp @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_join_with_join_distance_2d(self): c1 = np.array([[0, 1, 1.1, 2], [0, 0, 1, 0]]).transpose() c2 = np.array([[0.5, 1.05, 2.1], [0, 0, 0]]).transpose() t1 = Table([c1], names=["col"]) t2 = Table([c2], names=["col"]) join_func = join_distance( 0.2, kdtree_args={"leafsize": 32}, query_args={"p": 2} ) t12 = table.join(t1, t2, join_type="outer", join_funcs={"col": join_func}) exp = [ "col_id col_1 col_2 ", f'{t12["col_id"].dtype.name} float64[2] float64[2]', # int32 or int64 "------ ---------- -----------", " 1 1.0 .. 0.0 1.05 .. 0.0", " 2 2.0 .. 0.0 2.1 .. 0.0", " 3 0.0 .. 0.0 -- .. --", " 4 1.1 .. 1.0 -- .. --", " 5 -- .. -- 0.5 .. 0.0", ] assert t12.pformat(show_dtype=True) == exp def test_keys_left_right_basic(self): """Test using the keys_left and keys_right args to specify different join keys. This takes the standard test case but renames column 'a' to 'x' and 'y' respectively for tables 1 and 2. Then it compares the normal join on 'a' to the new join on 'x' and 'y'.""" self._setup() for join_type in ("inner", "left", "right", "outer"): t1 = self.t1.copy() t2 = self.t2.copy() # Expected is same as joining on 'a' but with names 'x', 'y' instead t12_exp = table.join(t1, t2, keys="a", join_type=join_type) t12_exp.add_column(t12_exp["a"], name="x", index=1) t12_exp.add_column(t12_exp["a"], name="y", index=len(t1.colnames) + 1) del t12_exp["a"] # Different key names t1.rename_column("a", "x") t2.rename_column("a", "y") keys_left_list = ["x"] # Test string key name keys_right_list = [["y"]] # Test list of string key names if join_type == "outer": # Just do this for the outer join (others are the same) keys_left_list.append([t1["x"].tolist()]) # Test list key column keys_right_list.append([t2["y"]]) # Test Column key column for keys_left, keys_right in zip(keys_left_list, keys_right_list): t12 = table.join( t1, t2, keys_left=keys_left, keys_right=keys_right, join_type=join_type, ) assert t12.colnames == t12_exp.colnames for col in t12.values_equal(t12_exp).itercols(): assert np.all(col) assert t12_exp.meta == t12.meta def test_keys_left_right_exceptions(self): """Test exceptions using the keys_left and keys_right args to specify different join keys. """ self._setup() t1 = self.t1 t2 = self.t2 msg = r"left table does not have key column 'z'" with pytest.raises(ValueError, match=msg): table.join(t1, t2, keys_left="z", keys_right=["a"]) msg = r"left table has different length from key \[1, 2\]" with pytest.raises(ValueError, match=msg): table.join(t1, t2, keys_left=[[1, 2]], keys_right=["a"]) msg = r"keys arg must be None if keys_left and keys_right are supplied" with pytest.raises(ValueError, match=msg): table.join(t1, t2, keys_left="z", keys_right=["a"], keys="a") msg = r"keys_left and keys_right args must have same length" with pytest.raises(ValueError, match=msg): table.join(t1, t2, keys_left=["a", "b"], keys_right=["a"]) msg = r"keys_left and keys_right must both be provided" with pytest.raises(ValueError, match=msg): table.join(t1, t2, keys_left=["a", "b"]) msg = r"cannot supply join_funcs arg and keys_left / keys_right" with pytest.raises(ValueError, match=msg): table.join(t1, t2, keys_left=["a"], keys_right=["a"], join_funcs={}) def test_join_structured_column(self): """Regression tests for gh-13271.""" # Two tables with matching names, including a structured column. t1 = Table( [ np.array([(1.0, 1), (2.0, 2)], dtype=[("f", "f8"), ("i", "i8")]), ["one", "two"], ], names=["structured", "string"], ) t2 = Table( [ np.array([(2.0, 2), (4.0, 4)], dtype=[("f", "f8"), ("i", "i8")]), ["three", "four"], ], names=["structured", "string"], ) t12 = table.join(t1, t2, ["structured"], join_type="outer") assert t12.pformat() == [ "structured [f, i] string_1 string_2", "----------------- -------- --------", " (1., 1) one --", " (2., 2) two three", " (4., 4) -- four", ] class TestSetdiff: def _setup(self, t_cls=Table): lines1 = [" a b ", " 0 foo ", " 1 foo ", " 1 bar ", " 2 bar "] lines2 = [" a b ", " 0 foo ", " 3 foo ", " 4 bar ", " 2 bar "] lines3 = [ " a b d ", " 0 foo R1", " 8 foo R2", " 1 bar R3", " 4 bar R4", ] self.t1 = t_cls.read(lines1, format="ascii") self.t2 = t_cls.read(lines2, format="ascii") self.t3 = t_cls.read(lines3, format="ascii") def test_default_same_columns(self, operation_table_type): self._setup(operation_table_type) out = table.setdiff(self.t1, self.t2) assert type(out["a"]) is type(self.t1["a"]) assert type(out["b"]) is type(self.t1["b"]) assert out.pformat() == [" a b ", "--- ---", " 1 bar", " 1 foo"] def test_default_same_tables(self, operation_table_type): self._setup(operation_table_type) out = table.setdiff(self.t1, self.t1) assert type(out["a"]) is type(self.t1["a"]) assert type(out["b"]) is type(self.t1["b"]) assert out.pformat() == [ " a b ", "--- ---", ] def test_extra_col_left_table(self, operation_table_type): self._setup(operation_table_type) with pytest.raises(ValueError): table.setdiff(self.t3, self.t1) def test_extra_col_right_table(self, operation_table_type): self._setup(operation_table_type) out = table.setdiff(self.t1, self.t3) assert type(out["a"]) is type(self.t1["a"]) assert type(out["b"]) is type(self.t1["b"]) assert out.pformat() == [ " a b ", "--- ---", " 1 foo", " 2 bar", ] def test_keys(self, operation_table_type): self._setup(operation_table_type) out = table.setdiff(self.t3, self.t1, keys=["a", "b"]) assert type(out["a"]) is type(self.t1["a"]) assert type(out["b"]) is type(self.t1["b"]) assert out.pformat() == [ " a b d ", "--- --- ---", " 4 bar R4", " 8 foo R2", ] def test_missing_key(self, operation_table_type): self._setup(operation_table_type) with pytest.raises(ValueError): table.setdiff(self.t3, self.t1, keys=["a", "d"]) class TestVStack: def _setup(self, t_cls=Table): self.t1 = t_cls.read( [ " a b", " 0. foo", " 1. bar", ], format="ascii", ) self.t2 = t_cls.read( [ " a b c", " 2. pez 4", " 3. sez 5", ], format="ascii", ) self.t3 = t_cls.read( [ " a b", " 4. 7", " 5. 8", " 6. 9", ], format="ascii", ) self.t4 = t_cls(self.t1, copy=True, masked=t_cls is Table) # The following table has meta-data that conflicts with t1 self.t5 = t_cls(self.t1, copy=True) self.t1.meta.update(OrderedDict([("b", [1, 2]), ("c", {"a": 1}), ("d", 1)])) self.t2.meta.update(OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)])) self.t4.meta.update(OrderedDict([("b", [5, 6]), ("c", {"c": 1}), ("e", 1)])) self.t5.meta.update(OrderedDict([("b", 3), ("c", "k"), ("d", 1)])) self.meta_merge = OrderedDict( [ ("b", [1, 2, 3, 4, 5, 6]), ("c", {"a": 1, "b": 1, "c": 1}), ("d", 1), ("a", 1), ("e", 1), ] ) def test_validate_join_type(self): self._setup() with pytest.raises(TypeError, match="Did you accidentally call vstack"): table.vstack(self.t1, self.t2) def test_stack_rows(self, operation_table_type): self._setup(operation_table_type) t2 = self.t1.copy() t2.meta.clear() out = table.vstack([self.t1, t2[1]]) assert type(out["a"]) is type(self.t1["a"]) assert type(out["b"]) is type(self.t1["b"]) assert out.pformat() == [ " a b ", "--- ---", "0.0 foo", "1.0 bar", "1.0 bar", ] def test_stack_table_column(self, operation_table_type): self._setup(operation_table_type) t2 = self.t1.copy() t2.meta.clear() out = table.vstack([self.t1, t2["a"]]) assert out.masked is False assert out.pformat() == [ " a b ", "--- ---", "0.0 foo", "1.0 bar", "0.0 --", "1.0 --", ] def test_table_meta_merge(self, operation_table_type): self._setup(operation_table_type) out = table.vstack([self.t1, self.t2, self.t4], join_type="inner") assert out.meta == self.meta_merge def test_table_meta_merge_conflict(self, operation_table_type): self._setup(operation_table_type) with pytest.warns(metadata.MergeConflictWarning) as w: out = table.vstack([self.t1, self.t5], join_type="inner") assert len(w) == 2 assert out.meta == self.t5.meta with pytest.warns(metadata.MergeConflictWarning) as w: out = table.vstack( [self.t1, self.t5], join_type="inner", metadata_conflicts="warn" ) assert len(w) == 2 assert out.meta == self.t5.meta out = table.vstack( [self.t1, self.t5], join_type="inner", metadata_conflicts="silent" ) assert out.meta == self.t5.meta with pytest.raises(MergeConflictError): out = table.vstack( [self.t1, self.t5], join_type="inner", metadata_conflicts="error" ) with pytest.raises(ValueError): out = table.vstack( [self.t1, self.t5], join_type="inner", metadata_conflicts="nonsense" ) def test_bad_input_type(self, operation_table_type): self._setup(operation_table_type) with pytest.raises(ValueError): table.vstack([]) with pytest.raises(TypeError): table.vstack(1) with pytest.raises(TypeError): table.vstack([self.t2, 1]) with pytest.raises(ValueError): table.vstack([self.t1, self.t2], join_type="invalid join type") def test_stack_basic_inner(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t4 = self.t4 t12 = table.vstack([t1, t2], join_type="inner") assert t12.masked is False assert type(t12) is operation_table_type assert type(t12["a"]) is type(t1["a"]) assert type(t12["b"]) is type(t1["b"]) assert t12.pformat() == [ " a b ", "--- ---", "0.0 foo", "1.0 bar", "2.0 pez", "3.0 sez", ] t124 = table.vstack([t1, t2, t4], join_type="inner") assert type(t124) is operation_table_type assert type(t12["a"]) is type(t1["a"]) assert type(t12["b"]) is type(t1["b"]) assert t124.pformat() == [ " a b ", "--- ---", "0.0 foo", "1.0 bar", "2.0 pez", "3.0 sez", "0.0 foo", "1.0 bar", ] def test_stack_basic_outer(self, operation_table_type): if operation_table_type is QTable: pytest.xfail("Quantity columns do not support masking.") self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t4 = self.t4 t12 = table.vstack([t1, t2], join_type="outer") assert t12.masked is False assert t12.pformat() == [ " a b c ", "--- --- ---", "0.0 foo --", "1.0 bar --", "2.0 pez 4", "3.0 sez 5", ] t124 = table.vstack([t1, t2, t4], join_type="outer") assert t124.masked is False assert t124.pformat() == [ " a b c ", "--- --- ---", "0.0 foo --", "1.0 bar --", "2.0 pez 4", "3.0 sez 5", "0.0 foo --", "1.0 bar --", ] def test_stack_incompatible(self, operation_table_type): self._setup(operation_table_type) with pytest.raises(TableMergeError) as excinfo: table.vstack([self.t1, self.t3], join_type="inner") assert "The 'b' columns have incompatible types: {}".format( [self.t1["b"].dtype.name, self.t3["b"].dtype.name] ) in str(excinfo.value) with pytest.raises(TableMergeError) as excinfo: table.vstack([self.t1, self.t3], join_type="outer") assert "The 'b' columns have incompatible types:" in str(excinfo.value) with pytest.raises(TableMergeError): table.vstack([self.t1, self.t2], join_type="exact") t1_reshape = self.t1.copy() t1_reshape["b"].shape = [2, 1] with pytest.raises(TableMergeError) as excinfo: table.vstack([self.t1, t1_reshape]) assert "have different shape" in str(excinfo.value) def test_vstack_one_masked(self, operation_table_type): if operation_table_type is QTable: pytest.xfail("Quantity columns do not support masking.") self._setup(operation_table_type) t1 = self.t1 t4 = self.t4 t4["b"].mask[1] = True t14 = table.vstack([t1, t4]) assert t14.masked is False assert t14.pformat() == [ " a b ", "--- ---", "0.0 foo", "1.0 bar", "0.0 foo", "1.0 --", ] def test_col_meta_merge_inner(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t4 = self.t4 # Key col 'a', should last value ('km') t1["a"].info.unit = "cm" t2["a"].info.unit = "m" t4["a"].info.unit = "km" # Key col 'a' format should take last when all match t1["a"].info.format = "%f" t2["a"].info.format = "%f" t4["a"].info.format = "%f" # Key col 'b', take first value 't1_b' t1["b"].info.description = "t1_b" # Key col 'b', take first non-empty value '%6s' t4["b"].info.format = "%6s" # Key col 'a', should be merged meta t1["a"].info.meta.update( OrderedDict([("b", [1, 2]), ("c", {"a": 1}), ("d", 1)]) ) t2["a"].info.meta.update( OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)]) ) t4["a"].info.meta.update( OrderedDict([("b", [5, 6]), ("c", {"c": 1}), ("e", 1)]) ) # Key col 'b', should be meta2 t2["b"].info.meta.update( OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)]) ) if operation_table_type is Table: ctx = pytest.warns(metadata.MergeConflictWarning) else: ctx = nullcontext() with ctx as warning_lines: out = table.vstack([t1, t2, t4], join_type="inner") if operation_table_type is Table: assert len(warning_lines) == 2 assert ( "In merged column 'a' the 'unit' attribute does not match (cm != m)" in str(warning_lines[0].message) ) assert ( "In merged column 'a' the 'unit' attribute does not match (m != km)" in str(warning_lines[1].message) ) # Check units are suitably ignored for a regular Table assert out.pformat() == [ " a b ", " km ", "-------- ------", "0.000000 foo", "1.000000 bar", "2.000000 pez", "3.000000 sez", "0.000000 foo", "1.000000 bar", ] else: # Check QTable correctly dealt with units. assert out.pformat() == [ " a b ", " km ", "-------- ------", "0.000000 foo", "0.000010 bar", "0.002000 pez", "0.003000 sez", "0.000000 foo", "1.000000 bar", ] assert out["a"].info.unit == "km" assert out["a"].info.format == "%f" assert out["b"].info.description == "t1_b" assert out["b"].info.format == "%6s" assert out["a"].info.meta == self.meta_merge assert out["b"].info.meta == OrderedDict( [("b", [3, 4]), ("c", {"b": 1}), ("a", 1)] ) def test_col_meta_merge_outer(self, operation_table_type): if operation_table_type is QTable: pytest.xfail("Quantity columns do not support masking.") self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t4 = self.t4 # Key col 'a', should last value ('km') t1["a"].unit = "cm" t2["a"].unit = "m" t4["a"].unit = "km" # Key col 'a' format should take last when all match t1["a"].info.format = "%0d" t2["a"].info.format = "%0d" t4["a"].info.format = "%0d" # Key col 'b', take first value 't1_b' t1["b"].info.description = "t1_b" # Key col 'b', take first non-empty value '%6s' t4["b"].info.format = "%6s" # Key col 'a', should be merged meta t1["a"].info.meta.update( OrderedDict([("b", [1, 2]), ("c", {"a": 1}), ("d", 1)]) ) t2["a"].info.meta.update( OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)]) ) t4["a"].info.meta.update( OrderedDict([("b", [5, 6]), ("c", {"c": 1}), ("e", 1)]) ) # Key col 'b', should be meta2 t2["b"].info.meta.update( OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)]) ) # All these should pass through t2["c"].unit = "m" t2["c"].info.format = "%6s" t2["c"].info.description = "t2_c" with pytest.warns(metadata.MergeConflictWarning) as warning_lines: out = table.vstack([t1, t2, t4], join_type="outer") assert len(warning_lines) == 2 assert ( "In merged column 'a' the 'unit' attribute does not match (cm != m)" in str(warning_lines[0].message) ) assert ( "In merged column 'a' the 'unit' attribute does not match (m != km)" in str(warning_lines[1].message) ) assert out["a"].unit == "km" assert out["a"].info.format == "%0d" assert out["b"].info.description == "t1_b" assert out["b"].info.format == "%6s" assert out["a"].info.meta == self.meta_merge assert out["b"].info.meta == OrderedDict( [("b", [3, 4]), ("c", {"b": 1}), ("a", 1)] ) assert out["c"].info.unit == "m" assert out["c"].info.format == "%6s" assert out["c"].info.description == "t2_c" def test_vstack_one_table(self, operation_table_type): self._setup(operation_table_type) """Regression test for issue #3313""" assert (self.t1 == table.vstack(self.t1)).all() assert (self.t1 == table.vstack([self.t1])).all() def test_mixin_functionality(self, mixin_cols): col = mixin_cols["m"] len_col = len(col) t = table.QTable([col], names=["a"]) cls_name = type(col).__name__ # Vstack works for these classes: if isinstance( col, ( u.Quantity, Time, TimeDelta, SkyCoord, EarthLocation, BaseRepresentationOrDifferential, ), ): out = table.vstack([t, t]) assert len(out) == len_col * 2 if cls_name == "SkyCoord": # Argh, SkyCoord needs __eq__!! assert skycoord_equal(out["a"][len_col:], col) assert skycoord_equal(out["a"][:len_col], col) elif "Repr" in cls_name or "Diff" in cls_name: assert np.all(representation_equal(out["a"][:len_col], col)) assert np.all(representation_equal(out["a"][len_col:], col)) else: assert np.all(out["a"][:len_col] == col) assert np.all(out["a"][len_col:] == col) else: with pytest.raises(NotImplementedError) as err: table.vstack([t, t]) assert "vstack unavailable for mixin column type(s): {}".format( cls_name ) in str(err.value) # Check for outer stack which requires masking. Only Time supports # this currently. t2 = table.QTable([col], names=["b"]) # different from col name for t if isinstance(col, (Time, TimeDelta, Quantity)): out = table.vstack([t, t2], join_type="outer") assert len(out) == len_col * 2 assert np.all(out["a"][:len_col] == col) assert np.all(out["b"][len_col:] == col) assert check_mask(out["a"], [False] * len_col + [True] * len_col) assert check_mask(out["b"], [True] * len_col + [False] * len_col) # check directly stacking mixin columns: out2 = table.vstack([t, t2["b"]]) assert np.all(out["a"] == out2["a"]) assert np.all(out["b"] == out2["b"]) else: with pytest.raises(NotImplementedError) as err: table.vstack([t, t2], join_type="outer") assert "vstack requires masking" in str( err.value ) or "vstack unavailable" in str(err.value) def test_vstack_different_representation(self): """Test that representations can be mixed together.""" rep1 = CartesianRepresentation([1, 2] * u.km, [3, 4] * u.km, 1 * u.km) rep2 = SphericalRepresentation([0] * u.deg, [0] * u.deg, 10 * u.km) t1 = Table([rep1]) t2 = Table([rep2]) t12 = table.vstack([t1, t2]) expected = CartesianRepresentation( [1, 2, 10] * u.km, [3, 4, 0] * u.km, [1, 1, 0] * u.km ) assert np.all(representation_equal(t12["col0"], expected)) rep3 = UnitSphericalRepresentation([0] * u.deg, [0] * u.deg) t3 = Table([rep3]) with pytest.raises(ValueError, match="representations are inconsistent"): table.vstack([t1, t3]) def test_vstack_structured_column(self): """Regression tests for gh-13271.""" # Two tables with matching names, including a structured column. t1 = Table( [ np.array([(1.0, 1), (2.0, 2)], dtype=[("f", "f8"), ("i", "i8")]), ["one", "two"], ], names=["structured", "string"], ) t2 = Table( [ np.array([(3.0, 3), (4.0, 4)], dtype=[("f", "f8"), ("i", "i8")]), ["three", "four"], ], names=["structured", "string"], ) t12 = table.vstack([t1, t2]) assert t12.pformat() == [ "structured [f, i] string", "----------------- ------", " (1., 1) one", " (2., 2) two", " (3., 3) three", " (4., 4) four", ] # One table without the structured column. t3 = t2[("string",)] t13 = table.vstack([t1, t3]) assert t13.pformat() == [ "structured [f, i] string", "----------------- ------", " (1.0, 1) one", " (2.0, 2) two", " -- three", " -- four", ] class TestDStack: def _setup(self, t_cls=Table): self.t1 = t_cls.read( [ " a b", " 0. foo", " 1. bar", ], format="ascii", ) self.t2 = t_cls.read( [ " a b c", " 2. pez 4", " 3. sez 5", ], format="ascii", ) self.t2["d"] = Time([1, 2], format="cxcsec") self.t3 = t_cls( { "a": [[5.0, 6.0], [4.0, 3.0]], "b": [["foo", "bar"], ["pez", "sez"]], }, names=("a", "b"), ) self.t4 = t_cls(self.t1, copy=True, masked=t_cls is Table) self.t5 = t_cls( { "a": [[4.0, 2.0], [1.0, 6.0]], "b": [["foo", "pez"], ["bar", "sez"]], }, names=("a", "b"), ) self.t6 = t_cls.read( [ " a b c", " 7. pez 2", " 4. sez 6", " 6. foo 3", ], format="ascii", ) def test_validate_join_type(self): self._setup() with pytest.raises(TypeError, match="Did you accidentally call dstack"): table.dstack(self.t1, self.t2) @staticmethod def compare_dstack(tables, out): for ii, tbl in enumerate(tables): for name, out_col in out.columns.items(): if name in tbl.colnames: # Columns always compare equal assert np.all(tbl[name] == out[name][:, ii]) # If input has a mask then output must have same mask if hasattr(tbl[name], "mask"): assert np.all(tbl[name].mask == out[name].mask[:, ii]) # If input has no mask then output might have a mask (if other table # is missing that column). If so then all mask values should be False. elif hasattr(out[name], "mask"): assert not np.any(out[name].mask[:, ii]) else: # Column missing for this table, out must have a mask with all True. assert np.all(out[name].mask[:, ii]) def test_dstack_table_column(self, operation_table_type): """Stack a table with 3 cols and one column (gets auto-converted to Table).""" self._setup(operation_table_type) t2 = self.t1.copy() out = table.dstack([self.t1, t2["a"]]) self.compare_dstack([self.t1, t2[("a",)]], out) def test_dstack_basic_outer(self, operation_table_type): if operation_table_type is QTable: pytest.xfail("Quantity columns do not support masking.") self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t4 = self.t4 t4["a"].mask[0] = True # Test for non-masked table t12 = table.dstack([t1, t2], join_type="outer") assert type(t12) is operation_table_type assert type(t12["a"]) is type(t1["a"]) assert type(t12["b"]) is type(t1["b"]) self.compare_dstack([t1, t2], t12) # Test for masked table t124 = table.dstack([t1, t2, t4], join_type="outer") assert type(t124) is operation_table_type assert type(t124["a"]) is type(t4["a"]) assert type(t124["b"]) is type(t4["b"]) self.compare_dstack([t1, t2, t4], t124) def test_dstack_basic_inner(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t4 = self.t4 # Test for masked table t124 = table.dstack([t1, t2, t4], join_type="inner") assert type(t124) is operation_table_type assert type(t124["a"]) is type(t4["a"]) assert type(t124["b"]) is type(t4["b"]) self.compare_dstack([t1, t2, t4], t124) def test_dstack_multi_dimension_column(self, operation_table_type): self._setup(operation_table_type) t3 = self.t3 t5 = self.t5 t2 = self.t2 t35 = table.dstack([t3, t5]) assert type(t35) is operation_table_type assert type(t35["a"]) is type(t3["a"]) assert type(t35["b"]) is type(t3["b"]) self.compare_dstack([t3, t5], t35) with pytest.raises(TableMergeError): table.dstack([t2, t3]) def test_dstack_different_length_table(self, operation_table_type): self._setup(operation_table_type) t2 = self.t2 t6 = self.t6 with pytest.raises(ValueError): table.dstack([t2, t6]) def test_dstack_single_table(self): self._setup(Table) out = table.dstack(self.t1) assert np.all(out == self.t1) def test_dstack_representation(self): rep1 = SphericalRepresentation([1, 2] * u.deg, [3, 4] * u.deg, 1 * u.kpc) rep2 = SphericalRepresentation([10, 20] * u.deg, [30, 40] * u.deg, 10 * u.kpc) t1 = Table([rep1]) t2 = Table([rep2]) t12 = table.dstack([t1, t2]) assert np.all(representation_equal(t12["col0"][:, 0], rep1)) assert np.all(representation_equal(t12["col0"][:, 1], rep2)) def test_dstack_skycoord(self): sc1 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg) sc2 = SkyCoord([10, 20] * u.deg, [30, 40] * u.deg) t1 = Table([sc1]) t2 = Table([sc2]) t12 = table.dstack([t1, t2]) assert skycoord_equal(sc1, t12["col0"][:, 0]) assert skycoord_equal(sc2, t12["col0"][:, 1]) def test_dstack_structured_column(self): """Regression tests for gh-13271.""" # Two tables with matching names, including a structured column. t1 = Table( [ np.array([(1.0, 1), (2.0, 2)], dtype=[("f", "f8"), ("i", "i8")]), ["one", "two"], ], names=["structured", "string"], ) t2 = Table( [ np.array([(3.0, 3), (4.0, 4)], dtype=[("f", "f8"), ("i", "i8")]), ["three", "four"], ], names=["structured", "string"], ) t12 = table.dstack([t1, t2]) assert t12.pformat() == [ "structured [f, i] string ", "------------------ ------------", "(1., 1) .. (3., 3) one .. three", "(2., 2) .. (4., 4) two .. four", ] # One table without the structured column. t3 = t2[("string",)] t13 = table.dstack([t1, t3]) assert t13.pformat() == [ "structured [f, i] string ", "----------------- ------------", " (1.0, 1) .. -- one .. three", " (2.0, 2) .. -- two .. four", ] class TestHStack: def _setup(self, t_cls=Table): self.t1 = t_cls.read( [ " a b", " 0. foo", " 1. bar", ], format="ascii", ) self.t2 = t_cls.read( [ " a b c", " 2. pez 4", " 3. sez 5", ], format="ascii", ) self.t3 = t_cls.read( [ " d e", " 4. 7", " 5. 8", " 6. 9", ], format="ascii", ) self.t4 = t_cls(self.t1, copy=True, masked=True) self.t4["a"].name = "f" self.t4["b"].name = "g" # The following table has meta-data that conflicts with t1 self.t5 = t_cls(self.t1, copy=True) self.t1.meta.update(OrderedDict([("b", [1, 2]), ("c", {"a": 1}), ("d", 1)])) self.t2.meta.update(OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)])) self.t4.meta.update(OrderedDict([("b", [5, 6]), ("c", {"c": 1}), ("e", 1)])) self.t5.meta.update(OrderedDict([("b", 3), ("c", "k"), ("d", 1)])) self.meta_merge = OrderedDict( [ ("b", [1, 2, 3, 4, 5, 6]), ("c", {"a": 1, "b": 1, "c": 1}), ("d", 1), ("a", 1), ("e", 1), ] ) def test_validate_join_type(self): self._setup() with pytest.raises(TypeError, match="Did you accidentally call hstack"): table.hstack(self.t1, self.t2) def test_stack_same_table(self, operation_table_type): """ From #2995, test that hstack'ing references to the same table has the expected output. """ self._setup(operation_table_type) out = table.hstack([self.t1, self.t1]) assert out.masked is False assert out.pformat() == [ "a_1 b_1 a_2 b_2", "--- --- --- ---", "0.0 foo 0.0 foo", "1.0 bar 1.0 bar", ] def test_stack_rows(self, operation_table_type): self._setup(operation_table_type) out = table.hstack([self.t1[0], self.t2[1]]) assert out.masked is False assert out.pformat() == [ "a_1 b_1 a_2 b_2 c ", "--- --- --- --- ---", "0.0 foo 3.0 sez 5", ] def test_stack_columns(self, operation_table_type): self._setup(operation_table_type) out = table.hstack([self.t1, self.t2["c"]]) assert type(out["a"]) is type(self.t1["a"]) assert type(out["b"]) is type(self.t1["b"]) assert type(out["c"]) is type(self.t2["c"]) assert out.pformat() == [ " a b c ", "--- --- ---", "0.0 foo 4", "1.0 bar 5", ] def test_table_meta_merge(self, operation_table_type): self._setup(operation_table_type) out = table.hstack([self.t1, self.t2, self.t4], join_type="inner") assert out.meta == self.meta_merge def test_table_meta_merge_conflict(self, operation_table_type): self._setup(operation_table_type) with pytest.warns(metadata.MergeConflictWarning) as w: out = table.hstack([self.t1, self.t5], join_type="inner") assert len(w) == 2 assert out.meta == self.t5.meta with pytest.warns(metadata.MergeConflictWarning) as w: out = table.hstack( [self.t1, self.t5], join_type="inner", metadata_conflicts="warn" ) assert len(w) == 2 assert out.meta == self.t5.meta out = table.hstack( [self.t1, self.t5], join_type="inner", metadata_conflicts="silent" ) assert out.meta == self.t5.meta with pytest.raises(MergeConflictError): out = table.hstack( [self.t1, self.t5], join_type="inner", metadata_conflicts="error" ) with pytest.raises(ValueError): out = table.hstack( [self.t1, self.t5], join_type="inner", metadata_conflicts="nonsense" ) def test_bad_input_type(self, operation_table_type): self._setup(operation_table_type) with pytest.raises(ValueError): table.hstack([]) with pytest.raises(TypeError): table.hstack(1) with pytest.raises(TypeError): table.hstack([self.t2, 1]) with pytest.raises(ValueError): table.hstack([self.t1, self.t2], join_type="invalid join type") def test_stack_basic(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t3 = self.t3 t4 = self.t4 out = table.hstack([t1, t2], join_type="inner") assert out.masked is False assert type(out) is operation_table_type assert type(out["a_1"]) is type(t1["a"]) assert type(out["b_1"]) is type(t1["b"]) assert type(out["a_2"]) is type(t2["a"]) assert type(out["b_2"]) is type(t2["b"]) assert out.pformat() == [ "a_1 b_1 a_2 b_2 c ", "--- --- --- --- ---", "0.0 foo 2.0 pez 4", "1.0 bar 3.0 sez 5", ] # stacking as a list gives same result out_list = table.hstack([t1, t2], join_type="inner") assert out.pformat() == out_list.pformat() out = table.hstack([t1, t2], join_type="outer") assert out.pformat() == out_list.pformat() out = table.hstack([t1, t2, t3, t4], join_type="outer") assert out.masked is False assert out.pformat() == [ "a_1 b_1 a_2 b_2 c d e f g ", "--- --- --- --- --- --- --- --- ---", "0.0 foo 2.0 pez 4 4.0 7 0.0 foo", "1.0 bar 3.0 sez 5 5.0 8 1.0 bar", " -- -- -- -- -- 6.0 9 -- --", ] out = table.hstack([t1, t2, t3, t4], join_type="inner") assert out.masked is False assert out.pformat() == [ "a_1 b_1 a_2 b_2 c d e f g ", "--- --- --- --- --- --- --- --- ---", "0.0 foo 2.0 pez 4 4.0 7 0.0 foo", "1.0 bar 3.0 sez 5 5.0 8 1.0 bar", ] def test_stack_incompatible(self, operation_table_type): self._setup(operation_table_type) # For join_type exact, which will fail here because n_rows # does not match with pytest.raises(TableMergeError): table.hstack([self.t1, self.t3], join_type="exact") def test_hstack_one_masked(self, operation_table_type): if operation_table_type is QTable: pytest.xfail() self._setup(operation_table_type) t1 = self.t1 t2 = operation_table_type(t1, copy=True, masked=True) t2.meta.clear() t2["b"].mask[1] = True out = table.hstack([t1, t2]) assert out.pformat() == [ "a_1 b_1 a_2 b_2", "--- --- --- ---", "0.0 foo 0.0 foo", "1.0 bar 1.0 --", ] def test_table_col_rename(self, operation_table_type): self._setup(operation_table_type) out = table.hstack( [self.t1, self.t2], join_type="inner", uniq_col_name="{table_name}_{col_name}", table_names=("left", "right"), ) assert out.masked is False assert out.pformat() == [ "left_a left_b right_a right_b c ", "------ ------ ------- ------- ---", " 0.0 foo 2.0 pez 4", " 1.0 bar 3.0 sez 5", ] def test_col_meta_merge(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t3 = self.t3[:2] t4 = self.t4 # Just set a bunch of meta and make sure it is the same in output meta1 = OrderedDict([("b", [1, 2]), ("c", {"a": 1}), ("d", 1)]) t1["a"].unit = "cm" t1["b"].info.description = "t1_b" t4["f"].info.format = "%6s" t1["b"].info.meta.update(meta1) t3["d"].info.meta.update( OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)]) ) t4["g"].info.meta.update( OrderedDict([("b", [5, 6]), ("c", {"c": 1}), ("e", 1)]) ) t3["e"].info.meta.update( OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)]) ) t3["d"].unit = "m" t3["d"].info.format = "%6s" t3["d"].info.description = "t3_c" out = table.hstack([t1, t3, t4], join_type="exact") for t in [t1, t3, t4]: for name in t.colnames: for attr in ("meta", "unit", "format", "description"): assert getattr(out[name].info, attr) == getattr(t[name].info, attr) # Make sure we got a copy of meta, not ref t1["b"].info.meta["b"] = None assert out["b"].info.meta["b"] == [1, 2] def test_hstack_one_table(self, operation_table_type): self._setup(operation_table_type) """Regression test for issue #3313""" assert (self.t1 == table.hstack(self.t1)).all() assert (self.t1 == table.hstack([self.t1])).all() def test_mixin_functionality(self, mixin_cols): col1 = mixin_cols["m"] col2 = col1[2:4] # Shorter version of col1 t1 = table.QTable([col1]) t2 = table.QTable([col2]) cls_name = type(col1).__name__ out = table.hstack([t1, t2], join_type="inner") assert type(out["col0_1"]) is type(out["col0_2"]) assert len(out) == len(col2) # Check that columns are as expected. if cls_name == "SkyCoord": assert skycoord_equal(out["col0_1"], col1[: len(col2)]) assert skycoord_equal(out["col0_2"], col2) elif "Repr" in cls_name or "Diff" in cls_name: assert np.all(representation_equal(out["col0_1"], col1[: len(col2)])) assert np.all(representation_equal(out["col0_2"], col2)) else: assert np.all(out["col0_1"] == col1[: len(col2)]) assert np.all(out["col0_2"] == col2) # Time class supports masking, all other mixins do not if isinstance(col1, (Time, TimeDelta, Quantity)): out = table.hstack([t1, t2], join_type="outer") assert len(out) == len(t1) assert np.all(out["col0_1"] == col1) assert np.all(out["col0_2"][: len(col2)] == col2) assert check_mask(out["col0_2"], [False, False, True, True]) # check directly stacking mixin columns: out2 = table.hstack([t1, t2["col0"]], join_type="outer") assert np.all(out["col0_1"] == out2["col0_1"]) assert np.all(out["col0_2"] == out2["col0_2"]) else: with pytest.raises(NotImplementedError) as err: table.hstack([t1, t2], join_type="outer") assert "hstack requires masking" in str(err.value) def test_unique(operation_table_type): t = operation_table_type.read( [ " a b c d", " 2 b 7.0 0", " 1 c 3.0 5", " 2 b 6.0 2", " 2 a 4.0 3", " 1 a 1.0 7", " 2 b 5.0 1", " 0 a 0.0 4", " 1 a 2.0 6", " 1 c 3.0 5", ], format="ascii", ) tu = operation_table_type(np.sort(t[:-1])) t_all = table.unique(t) assert sort_eq(t_all.pformat(), tu.pformat()) t_s = t.copy() del t_s["b", "c", "d"] t_all = table.unique(t_s) assert sort_eq( t_all.pformat(), [ " a ", "---", " 0", " 1", " 2", ], ) key1 = "a" t1a = table.unique(t, key1) assert sort_eq( t1a.pformat(), [ " a b c d ", "--- --- --- ---", " 0 a 0.0 4", " 1 c 3.0 5", " 2 b 7.0 0", ], ) t1b = table.unique(t, key1, keep="last") assert sort_eq( t1b.pformat(), [ " a b c d ", "--- --- --- ---", " 0 a 0.0 4", " 1 c 3.0 5", " 2 b 5.0 1", ], ) t1c = table.unique(t, key1, keep="none") assert sort_eq( t1c.pformat(), [ " a b c d ", "--- --- --- ---", " 0 a 0.0 4", ], ) key2 = ["a", "b"] t2a = table.unique(t, key2) assert sort_eq( t2a.pformat(), [ " a b c d ", "--- --- --- ---", " 0 a 0.0 4", " 1 a 1.0 7", " 1 c 3.0 5", " 2 a 4.0 3", " 2 b 7.0 0", ], ) t2b = table.unique(t, key2, keep="last") assert sort_eq( t2b.pformat(), [ " a b c d ", "--- --- --- ---", " 0 a 0.0 4", " 1 a 2.0 6", " 1 c 3.0 5", " 2 a 4.0 3", " 2 b 5.0 1", ], ) t2c = table.unique(t, key2, keep="none") assert sort_eq( t2c.pformat(), [ " a b c d ", "--- --- --- ---", " 0 a 0.0 4", " 2 a 4.0 3", ], ) key2 = ["a", "a"] with pytest.raises(ValueError) as exc: t2a = table.unique(t, key2) assert exc.value.args[0] == "duplicate key names" with pytest.raises(ValueError) as exc: table.unique(t, key2, keep=True) assert exc.value.args[0] == "'keep' should be one of 'first', 'last', 'none'" t1_m = operation_table_type(t1a, masked=True) t1_m["a"].mask[1] = True with pytest.raises(ValueError) as exc: t1_mu = table.unique(t1_m) assert ( exc.value.args[0] == "cannot use columns with masked values as keys; " "remove column 'a' from keys and rerun unique()" ) t1_mu = table.unique(t1_m, silent=True) assert t1_mu.masked is False assert t1_mu.pformat() == [ " a b c d ", "--- --- --- ---", " 0 a 0.0 4", " 2 b 7.0 0", " -- c 3.0 5", ] with pytest.raises(ValueError): t1_mu = table.unique(t1_m, silent=True, keys="a") t1_m = operation_table_type(t, masked=True) t1_m["a"].mask[1] = True t1_m["d"].mask[3] = True # Test that multiple masked key columns get removed in the correct # order t1_mu = table.unique(t1_m, keys=["d", "a", "b"], silent=True) assert t1_mu.masked is False assert t1_mu.pformat() == [ " a b c d ", "--- --- --- ---", " 2 a 4.0 --", " 2 b 7.0 0", " -- c 3.0 5", ] def test_vstack_bytes(operation_table_type): """ Test for issue #5617 when vstack'ing bytes columns in Py3. This is really an upstream numpy issue numpy/numpy/#8403. """ t = operation_table_type([[b"a"]], names=["a"]) assert t["a"].itemsize == 1 t2 = table.vstack([t, t]) assert len(t2) == 2 assert t2["a"].itemsize == 1 def test_vstack_unicode(): """ Test for problem related to issue #5617 when vstack'ing *unicode* columns. In this case the character size gets multiplied by 4. """ t = table.Table([["a"]], names=["a"]) assert t["a"].itemsize == 4 # 4-byte / char for U dtype t2 = table.vstack([t, t]) assert len(t2) == 2 assert t2["a"].itemsize == 4 def test_join_mixins_time_quantity(): """ Test for table join using non-ndarray key columns. """ tm1 = Time([2, 1, 2], format="cxcsec") q1 = [2, 1, 1] * u.m idx1 = [1, 2, 3] tm2 = Time([2, 3], format="cxcsec") q2 = [2, 3] * u.m idx2 = [10, 20] t1 = Table([tm1, q1, idx1], names=["tm", "q", "idx"]) t2 = Table([tm2, q2, idx2], names=["tm", "q", "idx"]) # Output: # # <Table length=4> # tm q idx_1 idx_2 # m # object float64 int64 int64 # ------------------ ------- ----- ----- # 0.9999999999969589 1.0 2 -- # 2.00000000000351 1.0 3 -- # 2.00000000000351 2.0 1 10 # 3.000000000000469 3.0 -- 20 t12 = table.join(t1, t2, join_type="outer", keys=["tm", "q"]) # Key cols are lexically sorted assert np.all(t12["tm"] == Time([1, 2, 2, 3], format="cxcsec")) assert np.all(t12["q"] == [1, 1, 2, 3] * u.m) assert np.all(t12["idx_1"] == np.ma.array([2, 3, 1, 0], mask=[0, 0, 0, 1])) assert np.all(t12["idx_2"] == np.ma.array([0, 0, 10, 20], mask=[1, 1, 0, 0])) def test_join_mixins_not_sortable(): """ Test for table join using non-ndarray key columns that are not sortable. """ sc = SkyCoord([1, 2], [3, 4], unit="deg,deg") t1 = Table([sc, [1, 2]], names=["sc", "idx1"]) t2 = Table([sc, [10, 20]], names=["sc", "idx2"]) with pytest.raises(TypeError, match="one or more key columns are not sortable"): table.join(t1, t2, keys="sc") def test_join_non_1d_key_column(): c1 = [[1, 2], [3, 4]] c2 = [1, 2] t1 = Table([c1, c2], names=["a", "b"]) t2 = t1.copy() with pytest.raises(ValueError, match="key column 'a' must be 1-d"): table.join(t1, t2, keys="a") def test_argsort_time_column(): """Regression test for #10823.""" times = Time(["2016-01-01", "2018-01-01", "2017-01-01"]) t = Table([times], names=["time"]) i = t.argsort("time") assert np.all(i == times.argsort()) def test_sort_indexed_table(): """Test fix for #9473 and #6545 - and another regression test for #10823.""" t = Table([[1, 3, 2], [6, 4, 5]], names=("a", "b")) t.add_index("a") t.sort("a") assert np.all(t["a"] == [1, 2, 3]) assert np.all(t["b"] == [6, 5, 4]) t.sort("b") assert np.all(t["b"] == [4, 5, 6]) assert np.all(t["a"] == [3, 2, 1]) times = ["2016-01-01", "2018-01-01", "2017-01-01"] tm = Time(times) t2 = Table([tm, [3, 2, 1]], names=["time", "flux"]) t2.sort("flux") assert np.all(t2["flux"] == [1, 2, 3]) t2.sort("time") assert np.all(t2["flux"] == [3, 1, 2]) assert np.all(t2["time"] == tm[[0, 2, 1]]) # Using the table as a TimeSeries implicitly sets the index, so # this test is a bit different from the above. from astropy.timeseries import TimeSeries ts = TimeSeries(time=times) ts["flux"] = [3, 2, 1] ts.sort("flux") assert np.all(ts["flux"] == [1, 2, 3]) ts.sort("time") assert np.all(ts["flux"] == [3, 1, 2]) assert np.all(ts["time"] == tm[[0, 2, 1]]) def test_get_out_class(): c = table.Column([1, 2]) mc = table.MaskedColumn([1, 2]) q = [1, 2] * u.m assert _get_out_class([c, mc]) is mc.__class__ assert _get_out_class([mc, c]) is mc.__class__ assert _get_out_class([c, c]) is c.__class__ assert _get_out_class([c]) is c.__class__ with pytest.raises(ValueError): _get_out_class([c, q]) with pytest.raises(ValueError): _get_out_class([q, c]) def test_masking_required_exception(): """ Test that outer join, hstack and vstack fail for a mixin column which does not support masking. """ col = table.NdarrayMixin([0, 1, 2, 3]) t1 = table.QTable([[1, 2, 3, 4], col], names=["a", "b"]) t2 = table.QTable([[1, 2], col[:2]], names=["a", "c"]) with pytest.raises(NotImplementedError) as err: table.vstack([t1, t2], join_type="outer") assert "vstack unavailable" in str(err.value) with pytest.raises(NotImplementedError) as err: table.hstack([t1, t2], join_type="outer") assert "hstack requires masking" in str(err.value) with pytest.raises(NotImplementedError) as err: table.join(t1, t2, join_type="outer") assert "join requires masking" in str(err.value) def test_stack_columns(): c = table.Column([1, 2]) mc = table.MaskedColumn([1, 2]) q = [1, 2] * u.m time = Time(["2001-01-02T12:34:56", "2001-02-03T00:01:02"]) sc = SkyCoord([1, 2], [3, 4], unit="deg") cq = table.Column([11, 22], unit=u.m) t = table.hstack([c, q]) assert t.__class__ is table.QTable assert t.masked is False t = table.hstack([q, c]) assert t.__class__ is table.QTable assert t.masked is False t = table.hstack([mc, q]) assert t.__class__ is table.QTable assert t.masked is False t = table.hstack([c, mc]) assert t.__class__ is table.Table assert t.masked is False t = table.vstack([q, q]) assert t.__class__ is table.QTable t = table.vstack([c, c]) assert t.__class__ is table.Table t = table.hstack([c, time]) assert t.__class__ is table.Table t = table.hstack([c, sc]) assert t.__class__ is table.Table t = table.hstack([q, time, sc]) assert t.__class__ is table.QTable with pytest.raises(ValueError): table.vstack([c, q]) with pytest.raises(ValueError): t = table.vstack([q, cq]) def test_mixin_join_regression(): # This used to trigger a ValueError: # ValueError: NumPy boolean array indexing assignment cannot assign # 6 input values to the 4 output values where the mask is true t1 = QTable() t1["index"] = [1, 2, 3, 4, 5] t1["flux1"] = [2, 3, 2, 1, 1] * u.Jy t1["flux2"] = [2, 3, 2, 1, 1] * u.Jy t2 = QTable() t2["index"] = [3, 4, 5, 6] t2["flux1"] = [2, 1, 1, 3] * u.Jy t2["flux2"] = [2, 1, 1, 3] * u.Jy t12 = table.join(t1, t2, keys=("index", "flux1", "flux2"), join_type="outer") assert len(t12) == 6
c40cff6f022797f93e061e8551feadb46c4009bdf959f7638a48b5edcf05ebf1
# Licensed under a 3-clause BSD style license - see LICENSE.rst import operator import warnings import numpy as np import pytest from numpy.testing import assert_array_equal from astropy import table, time from astropy import units as u from astropy.tests.helper import assert_follows_unicode_guidelines from astropy.utils.tests.test_metadata import MetaBaseTest class TestColumn: def test_subclass(self, Column): c = Column(name="a") assert isinstance(c, np.ndarray) c2 = c * 2 assert isinstance(c2, Column) assert isinstance(c2, np.ndarray) def test_numpy_ops(self, Column): """Show that basic numpy operations with Column behave sensibly""" arr = np.array([1, 2, 3]) c = Column(arr, name="a") for op, test_equal in ( (operator.eq, True), (operator.ne, False), (operator.ge, True), (operator.gt, False), (operator.le, True), (operator.lt, False), ): for eq in (op(c, arr), op(arr, c)): assert np.all(eq) if test_equal else not np.any(eq) assert len(eq) == 3 if Column is table.Column: assert type(eq) == np.ndarray else: assert type(eq) == np.ma.core.MaskedArray assert eq.dtype.str == "|b1" lt = c - 1 < arr assert np.all(lt) def test_numpy_boolean_ufuncs(self, Column): """Show that basic numpy operations with Column behave sensibly""" arr = np.array([1, 2, 3]) c = Column(arr, name="a") for ufunc, test_true in ( (np.isfinite, True), (np.isinf, False), (np.isnan, False), (np.sign, True), (np.signbit, False), ): result = ufunc(c) assert len(result) == len(c) assert np.all(result) if test_true else not np.any(result) if Column is table.Column: assert type(result) == np.ndarray else: assert type(result) == np.ma.core.MaskedArray if ufunc is not np.sign: assert result.dtype.str == "|b1" def test_view(self, Column): c = np.array([1, 2, 3], dtype=np.int64).view(Column) assert repr(c) == f"<{Column.__name__} dtype='int64' length=3>\n1\n2\n3" def test_format(self, Column): """Show that the formatted output from str() works""" from astropy import conf with conf.set_temp("max_lines", 8): c1 = Column(np.arange(2000), name="a", dtype=float, format="%6.2f") assert str(c1).splitlines() == [ " a ", "-------", " 0.00", " 1.00", " ...", "1998.00", "1999.00", "Length = 2000 rows", ] def test_convert_numpy_array(self, Column): d = Column([1, 2, 3], name="a", dtype="i8") np_data = np.array(d) assert np.all(np_data == d) np_data = np.array(d, copy=False) assert np.all(np_data == d) np_data = np.array(d, dtype="i4") assert np.all(np_data == d) def test_convert_unit(self, Column): d = Column([1, 2, 3], name="a", dtype="f8", unit="m") d.convert_unit_to("km") assert np.all(d.data == [0.001, 0.002, 0.003]) def test_array_wrap(self): """Test that the __array_wrap__ method converts a reduction ufunc output that has a different shape into an ndarray view. Without this a method call like c.mean() returns a Column array object with length=1.""" # Mean and sum for a 1-d float column c = table.Column(name="a", data=[1.0, 2.0, 3.0]) assert np.allclose(c.mean(), 2.0) assert isinstance(c.mean(), (np.floating, float)) assert np.allclose(c.sum(), 6.0) assert isinstance(c.sum(), (np.floating, float)) # Non-reduction ufunc preserves Column class assert isinstance(np.cos(c), table.Column) # Sum for a 1-d int column c = table.Column(name="a", data=[1, 2, 3]) assert np.allclose(c.sum(), 6) assert isinstance(c.sum(), (np.integer, int)) # Sum for a 2-d int column c = table.Column(name="a", data=[[1, 2, 3], [4, 5, 6]]) assert c.sum() == 21 assert isinstance(c.sum(), (np.integer, int)) assert np.all(c.sum(axis=0) == [5, 7, 9]) assert c.sum(axis=0).shape == (3,) assert isinstance(c.sum(axis=0), np.ndarray) # Sum and mean for a 1-d masked column c = table.MaskedColumn(name="a", data=[1.0, 2.0, 3.0], mask=[0, 0, 1]) assert np.allclose(c.mean(), 1.5) assert isinstance(c.mean(), (np.floating, float)) assert np.allclose(c.sum(), 3.0) assert isinstance(c.sum(), (np.floating, float)) def test_name_none(self, Column): """Can create a column without supplying name, which defaults to None""" c = Column([1, 2]) assert c.name is None assert np.all(c == np.array([1, 2])) def test_quantity_init(self, Column): c = Column(data=np.array([1, 2, 3]) * u.m) assert np.all(c.data == np.array([1, 2, 3])) assert np.all(c.unit == u.m) c = Column(data=np.array([1, 2, 3]) * u.m, unit=u.cm) assert np.all(c.data == np.array([100, 200, 300])) assert np.all(c.unit == u.cm) def test_quantity_comparison(self, Column): # regression test for gh-6532 c = Column([1, 2100, 3], unit="Hz") q = 2 * u.kHz check = c < q assert np.all(check == [True, False, True]) # This already worked, but just in case. check = q >= c assert np.all(check == [True, False, True]) def test_attrs_survive_getitem_after_change(self, Column): """ Test for issue #3023: when calling getitem with a MaskedArray subclass the original object attributes are not copied. """ c1 = Column( [1, 2, 3], name="a", unit="m", format="%i", description="aa", meta={"a": 1} ) c1.name = "b" c1.unit = "km" c1.format = "%d" c1.description = "bb" c1.meta = {"bbb": 2} for item in ( slice(None, None), slice(None, 1), np.array([0, 2]), np.array([False, True, False]), ): c2 = c1[item] assert c2.name == "b" assert c2.unit is u.km assert c2.format == "%d" assert c2.description == "bb" assert c2.meta == {"bbb": 2} # Make sure that calling getitem resulting in a scalar does # not copy attributes. val = c1[1] for attr in ("name", "unit", "format", "description", "meta"): assert not hasattr(val, attr) def test_to_quantity(self, Column): d = Column([1, 2, 3], name="a", dtype="f8", unit="m") assert np.all(d.quantity == ([1, 2, 3.0] * u.m)) assert np.all(d.quantity.value == ([1, 2, 3.0] * u.m).value) assert np.all(d.quantity == d.to("m")) assert np.all(d.quantity.value == d.to("m").value) np.testing.assert_allclose( d.to(u.km).value, ([0.001, 0.002, 0.003] * u.km).value ) np.testing.assert_allclose( d.to("km").value, ([0.001, 0.002, 0.003] * u.km).value ) np.testing.assert_allclose( d.to(u.MHz, u.equivalencies.spectral()).value, [299.792458, 149.896229, 99.93081933], ) d_nounit = Column([1, 2, 3], name="a", dtype="f8", unit=None) with pytest.raises(u.UnitsError): d_nounit.to(u.km) assert np.all(d_nounit.to(u.dimensionless_unscaled) == np.array([1, 2, 3])) # make sure the correct copy/no copy behavior is happening q = [1, 3, 5] * u.km # to should always make a copy d.to(u.km)[:] = q np.testing.assert_allclose(d, [1, 2, 3]) # explicit copying of the quantity should not change the column d.quantity.copy()[:] = q np.testing.assert_allclose(d, [1, 2, 3]) # but quantity directly is a "view", accessing the underlying column d.quantity[:] = q np.testing.assert_allclose(d, [1000, 3000, 5000]) # view should also work for integers d2 = Column([1, 2, 3], name="a", dtype=int, unit="m") d2.quantity[:] = q np.testing.assert_allclose(d2, [1000, 3000, 5000]) # but it should fail for strings or other non-numeric tables d3 = Column(["arg", "name", "stuff"], name="a", unit="m") with pytest.raises(TypeError): d3.quantity def test_to_funcunit_quantity(self, Column): """ Tests for #8424, check if function-unit can be retrieved from column. """ d = Column([1, 2, 3], name="a", dtype="f8", unit="dex(AA)") assert np.all(d.quantity == ([1, 2, 3] * u.dex(u.AA))) assert np.all(d.quantity.value == ([1, 2, 3] * u.dex(u.AA)).value) assert np.all(d.quantity == d.to("dex(AA)")) assert np.all(d.quantity.value == d.to("dex(AA)").value) # make sure, casting to linear unit works q = [10, 100, 1000] * u.AA np.testing.assert_allclose(d.to(u.AA), q) def test_item_access_type(self, Column): """ Tests for #3095, which forces integer item access to always return a plain ndarray or MaskedArray, even in the case of a multi-dim column. """ integer_types = (int, np.int_) for int_type in integer_types: c = Column([[1, 2], [3, 4]]) i0 = int_type(0) i1 = int_type(1) assert np.all(c[i0] == [1, 2]) assert type(c[i0]) == ( np.ma.MaskedArray if hasattr(Column, "mask") else np.ndarray ) assert c[i0].shape == (2,) c01 = c[i0:i1] assert np.all(c01 == [[1, 2]]) assert isinstance(c01, Column) assert c01.shape == (1, 2) c = Column([1, 2]) assert np.all(c[i0] == 1) assert isinstance(c[i0], np.integer) assert c[i0].shape == () c01 = c[i0:i1] assert np.all(c01 == [1]) assert isinstance(c01, Column) assert c01.shape == (1,) def test_insert_basic(self, Column): c = Column( [0, 1, 2], name="a", dtype=int, unit="mJy", format="%i", description="test column", meta={"c": 8, "d": 12}, ) # Basic insert c1 = c.insert(1, 100) assert np.all(c1 == [0, 100, 1, 2]) assert c1.attrs_equal(c) assert type(c) is type(c1) if hasattr(c1, "mask"): assert c1.data.shape == c1.mask.shape c1 = c.insert(-1, 100) assert np.all(c1 == [0, 1, 100, 2]) c1 = c.insert(3, 100) assert np.all(c1 == [0, 1, 2, 100]) c1 = c.insert(-3, 100) assert np.all(c1 == [100, 0, 1, 2]) c1 = c.insert(1, [100, 200, 300]) if hasattr(c1, "mask"): assert c1.data.shape == c1.mask.shape # Out of bounds index with pytest.raises((ValueError, IndexError)): c1 = c.insert(-4, 100) with pytest.raises((ValueError, IndexError)): c1 = c.insert(4, 100) def test_insert_axis(self, Column): """Insert with non-default axis kwarg""" c = Column([[1, 2], [3, 4]]) c1 = c.insert(1, [5, 6], axis=None) assert np.all(c1 == [1, 5, 6, 2, 3, 4]) c1 = c.insert(1, [5, 6], axis=1) assert np.all(c1 == [[1, 5, 2], [3, 6, 4]]) def test_insert_string_expand(self, Column): c = Column(["a", "b"]) c1 = c.insert(0, "abc") assert np.all(c1 == ["abc", "a", "b"]) c = Column(["a", "b"]) c1 = c.insert(0, ["c", "def"]) assert np.all(c1 == ["c", "def", "a", "b"]) def test_insert_string_masked_values(self): c = table.MaskedColumn(["a", "b"]) c1 = c.insert(0, np.ma.masked) assert np.all(c1 == ["", "a", "b"]) assert np.all(c1.mask == [True, False, False]) assert c1.dtype == "U1" c2 = c.insert(1, np.ma.MaskedArray(["ccc", "dd"], mask=[True, False])) assert np.all(c2 == ["a", "ccc", "dd", "b"]) assert np.all(c2.mask == [False, True, False, False]) assert c2.dtype == "U3" def test_insert_string_type_error(self, Column): c = Column([1, 2]) with pytest.raises(ValueError, match="invalid literal for int"): c.insert(0, "string") c = Column(["a", "b"]) with pytest.raises(TypeError, match="string operation on non-string array"): c.insert(0, 1) def test_insert_multidim(self, Column): c = Column([[1, 2], [3, 4]], name="a", dtype=int) # Basic insert c1 = c.insert(1, [100, 200]) assert np.all(c1 == [[1, 2], [100, 200], [3, 4]]) # Broadcast c1 = c.insert(1, 100) assert np.all(c1 == [[1, 2], [100, 100], [3, 4]]) # Wrong shape with pytest.raises(ValueError): c1 = c.insert(1, [100, 200, 300]) def test_insert_object(self, Column): c = Column(["a", 1, None], name="a", dtype=object) # Basic insert c1 = c.insert(1, [100, 200]) assert np.all(c1 == np.array(["a", [100, 200], 1, None], dtype=object)) def test_insert_masked(self): c = table.MaskedColumn( [0, 1, 2], name="a", fill_value=9999, mask=[False, True, False] ) # Basic insert c1 = c.insert(1, 100) assert np.all(c1.data.data == [0, 100, 1, 2]) assert c1.fill_value == 9999 assert np.all(c1.data.mask == [False, False, True, False]) assert type(c) is type(c1) for mask in (False, True): c1 = c.insert(1, 100, mask=mask) assert np.all(c1.data.data == [0, 100, 1, 2]) assert np.all(c1.data.mask == [False, mask, True, False]) def test_masked_multidim_as_list(self): data = np.ma.MaskedArray([1, 2], mask=[True, False]) c = table.MaskedColumn([data]) assert c.shape == (1, 2) assert np.all(c[0].mask == [True, False]) def test_insert_masked_multidim(self): c = table.MaskedColumn([[1, 2], [3, 4]], name="a", dtype=int) c1 = c.insert(1, [100, 200], mask=True) assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]]) assert np.all(c1.data.mask == [[False, False], [True, True], [False, False]]) c1 = c.insert(1, [100, 200], mask=[True, False]) assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]]) assert np.all(c1.data.mask == [[False, False], [True, False], [False, False]]) with pytest.raises(ValueError): c1 = c.insert(1, [100, 200], mask=[True, False, True]) def test_mask_on_non_masked_table(self): """ When table is not masked and trying to set mask on column then it's Raise AttributeError. """ t = table.Table([[1, 2], [3, 4]], names=("a", "b"), dtype=("i4", "f8")) with pytest.raises(AttributeError): t["a"].mask = [True, False] class TestAttrEqual: """Bunch of tests originally from ATpy that test the attrs_equal method.""" def test_5(self, Column): c1 = Column(name="a", dtype=int, unit="mJy") c2 = Column(name="a", dtype=int, unit="mJy") assert c1.attrs_equal(c2) def test_6(self, Column): c1 = Column( name="a", dtype=int, unit="mJy", format="%i", description="test column", meta={"c": 8, "d": 12}, ) c2 = Column( name="a", dtype=int, unit="mJy", format="%i", description="test column", meta={"c": 8, "d": 12}, ) assert c1.attrs_equal(c2) def test_7(self, Column): c1 = Column( name="a", dtype=int, unit="mJy", format="%i", description="test column", meta={"c": 8, "d": 12}, ) c2 = Column( name="b", dtype=int, unit="mJy", format="%i", description="test column", meta={"c": 8, "d": 12}, ) assert not c1.attrs_equal(c2) def test_8(self, Column): c1 = Column( name="a", dtype=int, unit="mJy", format="%i", description="test column", meta={"c": 8, "d": 12}, ) c2 = Column( name="a", dtype=float, unit="mJy", format="%i", description="test column", meta={"c": 8, "d": 12}, ) assert not c1.attrs_equal(c2) def test_9(self, Column): c1 = Column( name="a", dtype=int, unit="mJy", format="%i", description="test column", meta={"c": 8, "d": 12}, ) c2 = Column( name="a", dtype=int, unit="erg.cm-2.s-1.Hz-1", format="%i", description="test column", meta={"c": 8, "d": 12}, ) assert not c1.attrs_equal(c2) def test_10(self, Column): c1 = Column( name="a", dtype=int, unit="mJy", format="%i", description="test column", meta={"c": 8, "d": 12}, ) c2 = Column( name="a", dtype=int, unit="mJy", format="%g", description="test column", meta={"c": 8, "d": 12}, ) assert not c1.attrs_equal(c2) def test_11(self, Column): c1 = Column( name="a", dtype=int, unit="mJy", format="%i", description="test column", meta={"c": 8, "d": 12}, ) c2 = Column( name="a", dtype=int, unit="mJy", format="%i", description="another test column", meta={"c": 8, "d": 12}, ) assert not c1.attrs_equal(c2) def test_12(self, Column): c1 = Column( name="a", dtype=int, unit="mJy", format="%i", description="test column", meta={"c": 8, "d": 12}, ) c2 = Column( name="a", dtype=int, unit="mJy", format="%i", description="test column", meta={"e": 8, "d": 12}, ) assert not c1.attrs_equal(c2) def test_13(self, Column): c1 = Column( name="a", dtype=int, unit="mJy", format="%i", description="test column", meta={"c": 8, "d": 12}, ) c2 = Column( name="a", dtype=int, unit="mJy", format="%i", description="test column", meta={"c": 9, "d": 12}, ) assert not c1.attrs_equal(c2) def test_col_and_masked_col(self): c1 = table.Column( name="a", dtype=int, unit="mJy", format="%i", description="test column", meta={"c": 8, "d": 12}, ) c2 = table.MaskedColumn( name="a", dtype=int, unit="mJy", format="%i", description="test column", meta={"c": 8, "d": 12}, ) assert c1.attrs_equal(c2) assert c2.attrs_equal(c1) # Check that the meta descriptor is working as expected. The MetaBaseTest class # takes care of defining all the tests, and we simply have to define the class # and any minimal set of args to pass. class TestMetaColumn(MetaBaseTest): test_class = table.Column args = () class TestMetaMaskedColumn(MetaBaseTest): test_class = table.MaskedColumn args = () def test_getitem_metadata_regression(): """ Regression test for #1471: MaskedArray does not call __array_finalize__ so the meta-data was not getting copied over. By overloading _update_from we are able to work around this bug. """ # Make sure that meta-data gets propagated with __getitem__ c = table.Column( data=[1, 2], name="a", description="b", unit="m", format="%i", meta={"c": 8} ) assert c[1:2].name == "a" assert c[1:2].description == "b" assert c[1:2].unit == "m" assert c[1:2].format == "%i" assert c[1:2].meta["c"] == 8 c = table.MaskedColumn( data=[1, 2], name="a", description="b", unit="m", format="%i", meta={"c": 8} ) assert c[1:2].name == "a" assert c[1:2].description == "b" assert c[1:2].unit == "m" assert c[1:2].format == "%i" assert c[1:2].meta["c"] == 8 # As above, but with take() - check the method and the function c = table.Column( data=[1, 2, 3], name="a", description="b", unit="m", format="%i", meta={"c": 8} ) for subset in [c.take([0, 1]), np.take(c, [0, 1])]: assert subset.name == "a" assert subset.description == "b" assert subset.unit == "m" assert subset.format == "%i" assert subset.meta["c"] == 8 # Metadata isn't copied for scalar values for subset in [c.take(0), np.take(c, 0)]: assert subset == 1 assert subset.shape == () assert not isinstance(subset, table.Column) c = table.MaskedColumn( data=[1, 2, 3], name="a", description="b", unit="m", format="%i", meta={"c": 8} ) for subset in [c.take([0, 1]), np.take(c, [0, 1])]: assert subset.name == "a" assert subset.description == "b" assert subset.unit == "m" assert subset.format == "%i" assert subset.meta["c"] == 8 # Metadata isn't copied for scalar values for subset in [c.take(0), np.take(c, 0)]: assert subset == 1 assert subset.shape == () assert not isinstance(subset, table.MaskedColumn) def test_unicode_guidelines(): arr = np.array([1, 2, 3]) c = table.Column(arr, name="a") assert_follows_unicode_guidelines(c) def test_scalar_column(): """ Column is not designed to hold scalars, but for numpy 1.6 this can happen: >> type(np.std(table.Column([1, 2]))) astropy.table.column.Column """ c = table.Column(1.5) assert repr(c) == "1.5" assert str(c) == "1.5" def test_qtable_column_conversion(): """ Ensures that a QTable that gets assigned a unit switches to be Quantity-y """ qtab = table.QTable([[1, 2], [3, 4.2]], names=["i", "f"]) assert isinstance(qtab["i"], table.column.Column) assert isinstance(qtab["f"], table.column.Column) qtab["i"].unit = "km/s" assert isinstance(qtab["i"], u.Quantity) assert isinstance(qtab["f"], table.column.Column) # should follow from the above, but good to make sure as a #4497 regression test assert isinstance(qtab["i"][0], u.Quantity) assert isinstance(qtab[0]["i"], u.Quantity) assert not isinstance(qtab["f"][0], u.Quantity) assert not isinstance(qtab[0]["f"], u.Quantity) # Regression test for #5342: if a function unit is assigned, the column # should become the appropriate FunctionQuantity subclass. qtab["f"].unit = u.dex(u.cm / u.s**2) assert isinstance(qtab["f"], u.Dex) @pytest.mark.parametrize("masked", [True, False]) def test_string_truncation_warning(masked): """ Test warnings associated with in-place assignment to a string column that results in truncation of the right hand side. """ from inspect import currentframe, getframeinfo t = table.Table([["aa", "bb"]], names=["a"], masked=masked) t["a"][1] = "cc" t["a"][:] = "dd" with pytest.warns( table.StringTruncateWarning, match=r"truncated right side " r"string\(s\) longer than 2 character\(s\)", ) as w: frameinfo = getframeinfo(currentframe()) t["a"][0] = "eee" # replace item with string that gets truncated assert t["a"][0] == "ee" assert len(w) == 1 # Make sure the warning points back to the user code line assert w[0].lineno == frameinfo.lineno + 1 assert "test_column" in w[0].filename with pytest.warns( table.StringTruncateWarning, match=r"truncated right side " r"string\(s\) longer than 2 character\(s\)", ) as w: t["a"][:] = ["ff", "ggg"] # replace item with string that gets truncated assert np.all(t["a"] == ["ff", "gg"]) assert len(w) == 1 # Test the obscure case of assigning from an array that was originally # wider than any of the current elements (i.e. dtype is U4 but actual # elements are U1 at the time of assignment). val = np.array(["ffff", "gggg"]) val[:] = ["f", "g"] t["a"][:] = val assert np.all(t["a"] == ["f", "g"]) def test_string_truncation_warning_masked(): """ Test warnings associated with in-place assignment to a string to a masked column, specifically where the right hand side contains np.ma.masked. """ # Test for strings, but also cover assignment of np.ma.masked to # int and float masked column setting. This was previously only # covered in an unrelated io.ascii test (test_line_endings) which # showed an unexpected difference between handling of str and numeric # masked arrays. for values in (["a", "b"], [1, 2], [1.0, 2.0]): mc = table.MaskedColumn(values) mc[1] = np.ma.masked assert np.all(mc.mask == [False, True]) mc[:] = np.ma.masked assert np.all(mc.mask == [True, True]) mc = table.MaskedColumn(["aa", "bb"]) with pytest.warns( table.StringTruncateWarning, match=r"truncated right side " r"string\(s\) longer than 2 character\(s\)", ) as w: mc[:] = [np.ma.masked, "ggg"] # replace item with string that gets truncated assert mc[1] == "gg" assert np.all(mc.mask == [True, False]) assert len(w) == 1 @pytest.mark.parametrize("Column", (table.Column, table.MaskedColumn)) def test_col_unicode_sandwich_create_from_str(Column): """ Create a bytestring Column from strings (including unicode) in Py3. """ # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding. # Stress the system by injecting non-ASCII characters. uba = "bä" c = Column([uba, "def"], dtype="S") assert c.dtype.char == "S" assert c[0] == uba assert isinstance(c[0], str) assert isinstance(c[:0], table.Column) assert np.all(c[:2] == np.array([uba, "def"])) @pytest.mark.parametrize("Column", (table.Column, table.MaskedColumn)) def test_col_unicode_sandwich_bytes_obj(Column): """ Create a Column of dtype object with bytestring in it and make sure it keeps the bytestring and not convert to str with accessed. """ c = Column([None, b"def"]) assert c.dtype.char == "O" assert not c[0] assert c[1] == b"def" assert isinstance(c[1], bytes) assert not isinstance(c[1], str) assert isinstance(c[:0], table.Column) assert np.all(c[:2] == np.array([None, b"def"])) assert not np.all(c[:2] == np.array([None, "def"])) @pytest.mark.parametrize("Column", (table.Column, table.MaskedColumn)) def test_col_unicode_sandwich_bytes(Column): """ Create a bytestring Column from bytes and ensure that it works in Python 3 in a convenient way like in Python 2. """ # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding. # Stress the system by injecting non-ASCII characters. uba = "bä" uba8 = uba.encode("utf-8") c = Column([uba8, b"def"]) assert c.dtype.char == "S" assert c[0] == uba assert isinstance(c[0], str) assert isinstance(c[:0], table.Column) assert np.all(c[:2] == np.array([uba, "def"])) assert isinstance(c[:], table.Column) assert c[:].dtype.char == "S" # Array / list comparisons assert np.all(c == [uba, "def"]) ok = c == [uba8, b"def"] assert type(ok) is type(c.data) assert ok.dtype.char == "?" assert np.all(ok) assert np.all(c == np.array([uba, "def"])) assert np.all(c == np.array([uba8, b"def"])) # Scalar compare cmps = (uba, uba8) for cmp in cmps: ok = c == cmp assert type(ok) is type(c.data) assert np.all(ok == [True, False]) def test_col_unicode_sandwich_unicode(): """ Sanity check that Unicode Column behaves normally. """ uba = "bä" uba8 = uba.encode("utf-8") c = table.Column([uba, "def"], dtype="U") assert c[0] == uba assert isinstance(c[:0], table.Column) assert isinstance(c[0], str) assert np.all(c[:2] == np.array([uba, "def"])) assert isinstance(c[:], table.Column) assert c[:].dtype.char == "U" ok = c == [uba, "def"] assert type(ok) == np.ndarray assert ok.dtype.char == "?" assert np.all(ok) with warnings.catch_warnings(): # Ignore the FutureWarning in numpy >=1.24 (it is OK). warnings.filterwarnings("ignore", message=".*elementwise comparison failed.*") assert np.all(c != [uba8, b"def"]) def test_masked_col_unicode_sandwich(): """ Create a bytestring MaskedColumn and ensure that it works in Python 3 in a convenient way like in Python 2. """ c = table.MaskedColumn([b"abc", b"def"]) c[1] = np.ma.masked assert isinstance(c[:0], table.MaskedColumn) assert isinstance(c[0], str) assert c[0] == "abc" assert c[1] is np.ma.masked assert isinstance(c[:], table.MaskedColumn) assert c[:].dtype.char == "S" ok = c == ["abc", "def"] assert ok[0] assert ok[1] is np.ma.masked assert np.all(c == [b"abc", b"def"]) assert np.all(c == np.array(["abc", "def"])) assert np.all(c == np.array([b"abc", b"def"])) for cmp in ("abc", b"abc"): ok = c == cmp assert type(ok) is np.ma.MaskedArray assert ok[0] assert ok[1] is np.ma.masked @pytest.mark.parametrize("Column", (table.Column, table.MaskedColumn)) def test_unicode_sandwich_set(Column): """ Test setting """ uba = "bä" c = Column([b"abc", b"def"]) c[0] = b"aa" assert np.all(c == ["aa", "def"]) c[ 0 ] = uba # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding assert np.all(c == [uba, "def"]) assert c.pformat() == ["None", "----", " " + uba, " def"] c[:] = b"cc" assert np.all(c == ["cc", "cc"]) c[:] = uba assert np.all(c == [uba, uba]) c[:] = "" c[:] = [uba, b"def"] assert np.all(c == [uba, b"def"]) @pytest.mark.parametrize("class1", [table.MaskedColumn, table.Column]) @pytest.mark.parametrize("class2", [table.MaskedColumn, table.Column, str, list]) def test_unicode_sandwich_compare(class1, class2): """Test that comparing a bytestring Column/MaskedColumn with various str (unicode) object types gives the expected result. Tests #6838. """ obj1 = class1([b"a", b"c"]) if class2 is str: obj2 = "a" elif class2 is list: obj2 = ["a", "b"] else: obj2 = class2(["a", "b"]) assert np.all((obj1 == obj2) == [True, False]) assert np.all((obj2 == obj1) == [True, False]) assert np.all((obj1 != obj2) == [False, True]) assert np.all((obj2 != obj1) == [False, True]) assert np.all((obj1 > obj2) == [False, True]) assert np.all((obj2 > obj1) == [False, False]) assert np.all((obj1 <= obj2) == [True, False]) assert np.all((obj2 <= obj1) == [True, True]) assert np.all((obj1 < obj2) == [False, False]) assert np.all((obj2 < obj1) == [False, True]) assert np.all((obj1 >= obj2) == [True, True]) assert np.all((obj2 >= obj1) == [True, False]) def test_unicode_sandwich_masked_compare(): """Test the fix for #6839 from #6899.""" c1 = table.MaskedColumn(["a", "b", "c", "d"], mask=[True, False, True, False]) c2 = table.MaskedColumn([b"a", b"b", b"c", b"d"], mask=[True, True, False, False]) for cmp in ((c1 == c2), (c2 == c1)): assert cmp[0] is np.ma.masked assert cmp[1] is np.ma.masked assert cmp[2] is np.ma.masked assert cmp[3] for cmp in ((c1 != c2), (c2 != c1)): assert cmp[0] is np.ma.masked assert cmp[1] is np.ma.masked assert cmp[2] is np.ma.masked assert not cmp[3] # Note: comparisons <, >, >=, <= fail to return a masked array entirely, # see https://github.com/numpy/numpy/issues/10092. def test_structured_masked_column_roundtrip(): mc = table.MaskedColumn( [(1.0, 2.0), (3.0, 4.0)], mask=[(False, False), (False, False)], dtype="f8,f8" ) assert len(mc.dtype.fields) == 2 mc2 = table.MaskedColumn(mc) assert_array_equal(mc2, mc) @pytest.mark.parametrize("dtype", ["i4,f4", "f4,(2,)f8"]) def test_structured_empty_column_init(dtype): dtype = np.dtype(dtype) c = table.Column(length=5, shape=(2,), dtype=dtype) assert c.shape == (5, 2) assert c.dtype == dtype def test_column_value_access(): """Can a column's underlying data consistently be accessed via `.value`, whether it is a `Column`, `MaskedColumn`, `Quantity`, or `Time`?""" data = np.array([1, 2, 3]) tbl = table.QTable( { "a": table.Column(data), "b": table.MaskedColumn(data), "c": u.Quantity(data), "d": time.Time(data, format="mjd"), } ) assert type(tbl["a"].value) == np.ndarray assert type(tbl["b"].value) == np.ma.MaskedArray assert type(tbl["c"].value) == np.ndarray assert type(tbl["d"].value) == np.ndarray def test_masked_column_serialize_method_propagation(): mc = table.MaskedColumn([1.0, 2.0, 3.0], mask=[True, False, True]) assert mc.info.serialize_method["ecsv"] == "null_value" mc.info.serialize_method["ecsv"] = "data_mask" assert mc.info.serialize_method["ecsv"] == "data_mask" mc2 = mc.copy() assert mc2.info.serialize_method["ecsv"] == "data_mask" mc3 = table.MaskedColumn(mc) assert mc3.info.serialize_method["ecsv"] == "data_mask" mc4 = mc.view(table.MaskedColumn) assert mc4.info.serialize_method["ecsv"] == "data_mask" mc5 = mc[1:] assert mc5.info.serialize_method["ecsv"] == "data_mask" @pytest.mark.parametrize("dtype", ["S", "U", "i"]) def test_searchsorted(Column, dtype): c = Column([1, 2, 2, 3], dtype=dtype) if isinstance(Column, table.MaskedColumn): # Searchsorted seems to ignore the mask c[2] = np.ma.masked if dtype == "i": vs = (2, [2, 1]) else: vs = ("2", ["2", "1"], b"2", [b"2", b"1"]) for v in vs: v = np.array(v, dtype=dtype) exp = np.searchsorted(c.data, v, side="right") res = c.searchsorted(v, side="right") assert np.all(res == exp) res = np.searchsorted(c, v, side="right") assert np.all(res == exp)
9210630cb19e13a1c0cb5e4e41346b39c9b94f046e88599bbff9acec226188f8
# Licensed under a 3-clause BSD style license - see LICENSE.rst import sys import numpy as np import pytest from astropy import table from astropy import units as u from astropy.table import Row from .conftest import MaskedTable def test_masked_row_with_object_col(): """ Numpy < 1.8 has a bug in masked array that prevents access a row if there is a column with object type. """ t = table.Table([[1]], dtype=["O"], masked=True) t["col0"].mask = False assert t[0]["col0"] == 1 t["col0"].mask = True assert t[0]["col0"] is np.ma.masked @pytest.mark.usefixtures("table_types") class TestRow: def _setup(self, table_types): self._table_type = table_types.Table self._column_type = table_types.Column @property def t(self): # pytest wants to run this method once before table_types is run # to set Table and Column. In this case just return None, which would # cause any downstream test to fail if this happened in any other context. if self._column_type is None: return None if not hasattr(self, "_t"): a = self._column_type(name="a", data=[1, 2, 3], dtype="i8") b = self._column_type(name="b", data=[4, 5, 6], dtype="i8") self._t = self._table_type([a, b]) return self._t def test_subclass(self, table_types): """Row is subclass of ndarray and Row""" self._setup(table_types) c = Row(self.t, 2) assert isinstance(c, Row) def test_values(self, table_types): """Row accurately reflects table values and attributes""" self._setup(table_types) table = self.t row = table[1] assert row["a"] == 2 assert row["b"] == 5 assert row[0] == 2 assert row[1] == 5 assert row.meta is table.meta assert row.colnames == table.colnames assert row.columns is table.columns with pytest.raises(IndexError): row[2] if sys.byteorder == "little": assert str(row.dtype) == "[('a', '<i8'), ('b', '<i8')]" else: assert str(row.dtype) == "[('a', '>i8'), ('b', '>i8')]" def test_ref(self, table_types): """Row is a reference into original table data""" self._setup(table_types) table = self.t row = table[1] row["a"] = 10 if table_types.Table is not MaskedTable: assert table["a"][1] == 10 def test_left_equal(self, table_types): """Compare a table row to the corresponding structured array row""" self._setup(table_types) np_t = self.t.as_array() if table_types.Table is MaskedTable: with pytest.raises(ValueError): self.t[0] == np_t[0] else: for row, np_row in zip(self.t, np_t): assert np.all(row == np_row) def test_left_not_equal(self, table_types): """Compare a table row to the corresponding structured array row""" self._setup(table_types) np_t = self.t.as_array() np_t["a"] = [0, 0, 0] if table_types.Table is MaskedTable: with pytest.raises(ValueError): self.t[0] == np_t[0] else: for row, np_row in zip(self.t, np_t): assert np.all(row != np_row) def test_right_equal(self, table_types): """Test right equal""" self._setup(table_types) np_t = self.t.as_array() if table_types.Table is MaskedTable: with pytest.raises(ValueError): self.t[0] == np_t[0] else: for row, np_row in zip(self.t, np_t): assert np.all(np_row == row) def test_convert_numpy_array(self, table_types): self._setup(table_types) d = self.t[1] np_data = np.array(d) if table_types.Table is not MaskedTable: assert np.all(np_data == d.as_void()) assert np_data is not d.as_void() assert d.colnames == list(np_data.dtype.names) np_data = np.array(d, copy=False) if table_types.Table is not MaskedTable: assert np.all(np_data == d.as_void()) assert np_data is not d.as_void() assert d.colnames == list(np_data.dtype.names) with pytest.raises(ValueError): np_data = np.array(d, dtype=[("c", "i8"), ("d", "i8")]) def test_format_row(self, table_types): """Test formatting row""" self._setup(table_types) table = self.t row = table[0] assert repr(row).splitlines() == [ "<{} {}{}>".format( row.__class__.__name__, "index=0", " masked=True" if table.masked else "", ), " a b ", "int64 int64", "----- -----", " 1 4", ] assert str(row).splitlines() == [" a b ", "--- ---", " 1 4"] assert row._repr_html_().splitlines() == [ "<i>{} {}{}</i>".format( row.__class__.__name__, "index=0", " masked=True" if table.masked else "", ), f'<table id="table{id(table)}">', "<thead><tr><th>a</th><th>b</th></tr></thead>", "<thead><tr><th>int64</th><th>int64</th></tr></thead>", "<tr><td>1</td><td>4</td></tr>", "</table>", ] def test_as_void(self, table_types): """Test the as_void() method""" self._setup(table_types) table = self.t row = table[0] # If masked then with no masks, issue numpy/numpy#483 should come # into play. Make sure as_void() code is working. row_void = row.as_void() if table.masked: assert isinstance(row_void, np.ma.mvoid) else: assert isinstance(row_void, np.void) assert row_void["a"] == 1 assert row_void["b"] == 4 # Confirm row is a view of table but row_void is not. table["a"][0] = -100 assert row["a"] == -100 assert row_void["a"] == 1 # Make sure it works for a table that has masked elements if table.masked: table["a"].mask = True # row_void is not a view, need to re-make assert row_void["a"] == 1 row_void = row.as_void() # but row is a view assert row["a"] is np.ma.masked def test_row_and_as_void_with_objects(self, table_types): """Test the deprecated data property and as_void() method""" t = table_types.Table([[{"a": 1}, {"b": 2}]], names=("a",)) assert t[0][0] == {"a": 1} assert t[0]["a"] == {"a": 1} assert t[0].as_void()[0] == {"a": 1} assert t[0].as_void()["a"] == {"a": 1} def test_bounds_checking(self, table_types): """Row gives index error upon creation for out-of-bounds index""" self._setup(table_types) for ibad in (-5, -4, 3, 4): with pytest.raises(IndexError): self.t[ibad] def test_create_rows_from_list(self, table_types): """https://github.com/astropy/astropy/issues/8976""" orig_tab = table_types.Table([[1, 2, 3], [4, 5, 6]], names=("a", "b")) new_tab = type(orig_tab)( rows=[row for row in orig_tab], names=orig_tab.dtype.names ) assert np.all(orig_tab == new_tab) def test_row_keys_values(self, table_types): self._setup(table_types) row = self.t[0] for row_key, col_key in zip(row.keys(), self.t.columns.keys()): assert row_key == col_key for row_value, col in zip(row.values(), self.t.columns.values()): assert row_value == col[0] def test_row_as_mapping(self, table_types): self._setup(table_types) row = self.t[0] row_dict = dict(row) for key, value in row_dict.items(): assert row[key] == value def f(**kwargs): return kwargs row_splatted = f(**row) for key, value in row_splatted.items(): assert row[key] == value def test_row_as_sequence(self, table_types): self._setup(table_types) row = self.t[0] row_tuple = tuple(row) keys = tuple(row.keys()) for key, value in zip(keys, row_tuple): assert row[key] == value def f(*args): return args row_splatted = f(*row) for key, value in zip(keys, row_splatted): assert row[key] == value def test_row_tuple_column_slice(): """ Test getting and setting a row using a tuple or list of column names """ t = table.QTable( [ [1, 2, 3] * u.m, [10.0, 20.0, 30.0], [100.0, 200.0, 300.0], ["x", "y", "z"], ], names=["a", "b", "c", "d"], ) # Get a row for index=1 r1 = t[1] # Column slice with tuple of col names r1_abc = r1["a", "b", "c"] # Row object for these cols r1_abc_repr = [ "<Row index=1>", " a b c ", " m ", "float64 float64 float64", "------- ------- -------", " 2.0 20.0 200.0", ] assert repr(r1_abc).splitlines() == r1_abc_repr # Column slice with list of col names r1_abc = r1[["a", "b", "c"]] assert repr(r1_abc).splitlines() == r1_abc_repr # Make sure setting on a tuple or slice updates parent table and row r1["c"] = 1000 r1["a", "b"] = 1000 * u.cm, 100.0 assert r1["a"] == 10 * u.m assert r1["b"] == 100 assert t["a"][1] == 10 * u.m assert t["b"][1] == 100.0 assert t["c"][1] == 1000 # Same but using a list of column names instead of tuple r1[["a", "b"]] = 2000 * u.cm, 200.0 assert r1["a"] == 20 * u.m assert r1["b"] == 200 assert t["a"][1] == 20 * u.m assert t["b"][1] == 200.0 # Set column slice of column slice r1_abc["a", "c"] = -1 * u.m, -10 assert t["a"][1] == -1 * u.m assert t["b"][1] == 200.0 assert t["c"][1] == -10.0 # Bad column name with pytest.raises(KeyError) as err: t[1]["a", "not_there"] assert "'not_there'" in str(err.value) # Too many values with pytest.raises(ValueError) as err: t[1]["a", "b"] = 1 * u.m, 2, 3 assert "right hand side must be a sequence" in str(err.value) # Something without a length with pytest.raises(ValueError) as err: t[1]["a", "b"] = 1 assert "right hand side must be a sequence" in str(err.value) def test_row_tuple_column_slice_transaction(): """ Test that setting a row that fails part way through does not change the table at all. """ t = table.QTable( [ [10.0, 20.0, 30.0], [1, 2, 3] * u.m, ], names=["a", "b"], ) tc = t.copy() # First one succeeds but second fails. with pytest.raises(ValueError) as err: t[1]["a", "b"] = (-1, -1 * u.s) # Bad unit assert "'s' (time) and 'm' (length) are not convertible" in str(err.value) assert t[1] == tc[1] def test_uint_indexing(): """ Test that accessing a row with an unsigned integer works as with a signed integer. Similarly tests that printing such a row works. This is non-trivial: adding a signed and unsigned integer in numpy results in a float, which is an invalid slice index. Regression test for gh-7464. """ t = table.Table([[1.0, 2.0, 3.0]], names="a") assert t["a"][1] == 2.0 assert t["a"][np.int_(1)] == 2.0 assert t["a"][np.uint(1)] == 2.0 assert t[np.uint(1)]["a"] == 2.0 trepr = [ "<Row index=1>", " a ", "float64", "-------", " 2.0", ] assert repr(t[1]).splitlines() == trepr assert repr(t[np.int_(1)]).splitlines() == trepr assert repr(t[np.uint(1)]).splitlines() == trepr
09587b474bfdca9bb317fec9e660d92be1b30c7bf17784ec7e8e303435f30e45
# Licensed under a 3-clause BSD style license - see LICENSE.rst import copy import pickle from io import StringIO import numpy as np import pytest from astropy import coordinates, time from astropy import units as u from astropy.coordinates import EarthLocation, SkyCoord from astropy.coordinates.tests.helper import skycoord_equal from astropy.coordinates.tests.test_representation import representation_equal from astropy.table import ( Column, NdarrayMixin, QTable, Table, hstack, join, serialize, table_helpers, vstack, ) from astropy.table.column import BaseColumn from astropy.table.serialize import represent_mixins_as_columns from astropy.table.table_helpers import ArrayWrapper from astropy.utils.data_info import ParentDtypeInfo from astropy.utils.exceptions import AstropyUserWarning from astropy.utils.metadata import MergeConflictWarning from .conftest import MIXIN_COLS def test_attributes(mixin_cols): """ Required attributes for a column can be set. """ m = mixin_cols["m"] m.info.name = "a" assert m.info.name == "a" m.info.description = "a" assert m.info.description == "a" # Cannot set unit for these classes if isinstance( m, ( u.Quantity, coordinates.SkyCoord, time.Time, time.TimeDelta, coordinates.BaseRepresentationOrDifferential, ), ): with pytest.raises(AttributeError): m.info.unit = u.m else: m.info.unit = u.m assert m.info.unit is u.m m.info.format = "a" assert m.info.format == "a" m.info.meta = {"a": 1} assert m.info.meta == {"a": 1} with pytest.raises(AttributeError): m.info.bad_attr = 1 with pytest.raises(AttributeError): m.info.bad_attr def check_mixin_type(table, table_col, in_col): # We check for QuantityInfo rather than just isinstance(col, u.Quantity) # since we want to treat EarthLocation as a mixin, even though it is # a Quantity subclass. if ( isinstance(in_col.info, u.QuantityInfo) and type(table) is not QTable ) or isinstance(in_col, Column): assert type(table_col) is table.ColumnClass else: assert type(table_col) is type(in_col) # Make sure in_col got copied and creating table did not touch it assert in_col.info.name is None def test_make_table(table_types, mixin_cols): """ Make a table with the columns in mixin_cols, which is an ordered dict of three cols: 'a' and 'b' are table_types.Column type, and 'm' is a mixin. """ t = table_types.Table(mixin_cols) check_mixin_type(t, t["m"], mixin_cols["m"]) cols = list(mixin_cols.values()) t = table_types.Table(cols, names=("i", "a", "b", "m")) check_mixin_type(t, t["m"], mixin_cols["m"]) t = table_types.Table(cols) check_mixin_type(t, t["col3"], mixin_cols["m"]) def test_io_ascii_write(): """ Test that table with mixin column can be written by io.ascii for every pure Python writer. No validation of the output is done, this just confirms no exceptions. """ from astropy.io.ascii.connect import _get_connectors_table t = QTable(MIXIN_COLS) for fmt in _get_connectors_table(): if fmt["Write"] and ".fast_" not in fmt["Format"]: out = StringIO() t.write(out, format=fmt["Format"]) def test_votable_quantity_write(tmp_path): """ Test that table with Quantity mixin column can be round-tripped by io.votable. Note that FITS and HDF5 mixin support are tested (much more thoroughly) in their respective subpackage tests (io/fits/tests/test_connect.py and io/misc/tests/test_hdf5.py). """ t = QTable() t["a"] = u.Quantity([1, 2, 4], unit="nm") filename = tmp_path / "table-tmp" t.write(filename, format="votable", overwrite=True) qt = QTable.read(filename, format="votable") assert isinstance(qt["a"], u.Quantity) assert qt["a"].unit == "nm" @pytest.mark.remote_data @pytest.mark.parametrize("table_types", (Table, QTable)) def test_io_time_write_fits_standard(tmp_path, table_types): """ Test that table with Time mixin columns can be written by io.fits. Validation of the output is done. Test that io.fits writes a table containing Time mixin columns that can be partially round-tripped (metadata scale, location). Note that we postpone checking the "local" scale, since that cannot be done with format 'cxcsec', as it requires an epoch. """ t = table_types([[1, 2], ["string", "column"]]) for scale in time.STANDARD_TIME_SCALES: t["a" + scale] = time.Time( [[1, 2], [3, 4]], format="cxcsec", scale=scale, location=EarthLocation(-2446354, 4237210, 4077985, unit="m"), ) t["b" + scale] = time.Time( ["1999-01-01T00:00:00.123456789", "2010-01-01T00:00:00"], scale=scale ) t["c"] = [3.0, 4.0] filename = tmp_path / "table-tmp" # Show that FITS format succeeds with pytest.warns( AstropyUserWarning, match=( 'Time Column "btai" has no specified location, ' "but global Time Position is present" ), ): t.write(filename, format="fits", overwrite=True) with pytest.warns( AstropyUserWarning, match='Time column reference position "TRPOSn" is not specified', ): tm = table_types.read(filename, format="fits", astropy_native=True) for scale in time.STANDARD_TIME_SCALES: for ab in ("a", "b"): name = ab + scale # Assert that the time columns are read as Time assert isinstance(tm[name], time.Time) # Assert that the scales round-trip assert tm[name].scale == t[name].scale # Assert that the format is jd assert tm[name].format == "jd" # Assert that the location round-trips assert tm[name].location == t[name].location # Finally assert that the column data round-trips assert (tm[name] == t[name]).all() for name in ("col0", "col1", "c"): # Assert that the non-time columns are read as Column assert isinstance(tm[name], Column) # Assert that the non-time columns' data round-trips assert (tm[name] == t[name]).all() # Test for conversion of time data to its value, as defined by its format for scale in time.STANDARD_TIME_SCALES: for ab in ("a", "b"): name = ab + scale t[name].info.serialize_method["fits"] = "formatted_value" t.write(filename, format="fits", overwrite=True) tm = table_types.read(filename, format="fits") for scale in time.STANDARD_TIME_SCALES: for ab in ("a", "b"): name = ab + scale assert not isinstance(tm[name], time.Time) assert (tm[name] == t[name].value).all() @pytest.mark.parametrize("table_types", (Table, QTable)) def test_io_time_write_fits_local(tmp_path, table_types): """ Test that table with a Time mixin with scale local can also be written by io.fits. Like ``test_io_time_write_fits_standard`` above, but avoiding ``cxcsec`` format, which requires an epoch and thus cannot be used for a local time scale. """ t = table_types([[1, 2], ["string", "column"]]) t["a_local"] = time.Time( [[50001, 50002], [50003, 50004]], format="mjd", scale="local", location=EarthLocation(-2446354, 4237210, 4077985, unit="m"), ) t["b_local"] = time.Time( ["1999-01-01T00:00:00.123456789", "2010-01-01T00:00:00"], scale="local" ) t["c"] = [3.0, 4.0] filename = tmp_path / "table-tmp" # Show that FITS format succeeds with pytest.warns( AstropyUserWarning, match='Time Column "b_local" has no specified location' ): t.write(filename, format="fits", overwrite=True) with pytest.warns( AstropyUserWarning, match='Time column reference position "TRPOSn" is not specified.', ): tm = table_types.read(filename, format="fits", astropy_native=True) for ab in ("a", "b"): name = ab + "_local" # Assert that the time columns are read as Time assert isinstance(tm[name], time.Time) # Assert that the scales round-trip assert tm[name].scale == t[name].scale # Assert that the format is jd assert tm[name].format == "jd" # Assert that the location round-trips assert tm[name].location == t[name].location # Finally assert that the column data round-trips assert (tm[name] == t[name]).all() for name in ("col0", "col1", "c"): # Assert that the non-time columns are read as Column assert isinstance(tm[name], Column) # Assert that the non-time columns' data round-trips assert (tm[name] == t[name]).all() # Test for conversion of time data to its value, as defined by its format. for ab in ("a", "b"): name = ab + "_local" t[name].info.serialize_method["fits"] = "formatted_value" t.write(filename, format="fits", overwrite=True) tm = table_types.read(filename, format="fits") for ab in ("a", "b"): name = ab + "_local" assert not isinstance(tm[name], time.Time) assert (tm[name] == t[name].value).all() def test_votable_mixin_write_fail(mixin_cols): """ Test that table with mixin columns (excluding Quantity) cannot be written by io.votable. """ t = QTable(mixin_cols) # Only do this test if there are unsupported column types (i.e. anything besides # BaseColumn and Quantity class instances). unsupported_cols = t.columns.not_isinstance((BaseColumn, u.Quantity)) if not unsupported_cols: pytest.skip("no unsupported column types") out = StringIO() with pytest.raises(ValueError) as err: t.write(out, format="votable") assert "cannot write table with mixin column(s)" in str(err.value) def test_join(table_types): """ Join tables with mixin cols. Use column "i" as proxy for what the result should be for each mixin. """ t1 = table_types.Table() t1["a"] = table_types.Column(["a", "b", "b", "c"]) t1["i"] = table_types.Column([0, 1, 2, 3]) for name, col in MIXIN_COLS.items(): t1[name] = col t2 = table_types.Table(t1) t2["a"] = ["b", "c", "a", "d"] for name, col in MIXIN_COLS.items(): t1[name].info.description = name t2[name].info.description = name + "2" for join_type in ("inner", "left"): t12 = join(t1, t2, keys="a", join_type=join_type) idx1 = t12["i_1"] idx2 = t12["i_2"] for name, col in MIXIN_COLS.items(): name1 = name + "_1" name2 = name + "_2" assert_table_name_col_equal(t12, name1, col[idx1]) assert_table_name_col_equal(t12, name2, col[idx2]) assert t12[name1].info.description == name assert t12[name2].info.description == name + "2" for join_type in ("outer", "right"): with pytest.raises(NotImplementedError) as exc: t12 = join(t1, t2, keys="a", join_type=join_type) assert "join requires masking column" in str(exc.value) with pytest.raises(TypeError) as exc: t12 = join(t1, t2, keys=["a", "skycoord"]) assert "one or more key columns are not sortable" in str(exc.value) # Join does work for a mixin which is a subclass of np.ndarray with pytest.warns( MergeConflictWarning, match="In merged column 'quantity' the 'description' attribute does not match", ): t12 = join(t1, t2, keys=["quantity"]) assert np.all(t12["a_1"] == t1["a"]) def test_hstack(table_types): """ Hstack tables with mixin cols. Use column "i" as proxy for what the result should be for each mixin. """ t1 = table_types.Table() t1["i"] = table_types.Column([0, 1, 2, 3]) for name, col in MIXIN_COLS.items(): t1[name] = col t1[name].info.description = name t1[name].info.meta = {"a": 1} for join_type in ("inner", "outer"): for chop in (True, False): t2 = table_types.Table(t1) if chop: t2 = t2[:-1] if join_type == "outer": with pytest.raises(NotImplementedError) as exc: t12 = hstack([t1, t2], join_type=join_type) assert "hstack requires masking column" in str(exc.value) continue t12 = hstack([t1, t2], join_type=join_type) idx1 = t12["i_1"] idx2 = t12["i_2"] for name, col in MIXIN_COLS.items(): name1 = name + "_1" name2 = name + "_2" assert_table_name_col_equal(t12, name1, col[idx1]) assert_table_name_col_equal(t12, name2, col[idx2]) for attr in ("description", "meta"): assert getattr(t1[name].info, attr) == getattr( t12[name1].info, attr ) assert getattr(t2[name].info, attr) == getattr( t12[name2].info, attr ) def assert_table_name_col_equal(t, name, col): """ Assert all(t[name] == col), with special handling for known mixin cols. """ if isinstance(col, coordinates.SkyCoord): assert np.all(t[name].ra == col.ra) assert np.all(t[name].dec == col.dec) elif isinstance(col, coordinates.BaseRepresentationOrDifferential): assert np.all(representation_equal(t[name], col)) elif isinstance(col, u.Quantity): if type(t) is QTable: assert np.all(t[name] == col) elif isinstance(col, table_helpers.ArrayWrapper): assert np.all(t[name].data == col.data) else: assert np.all(t[name] == col) def test_get_items(mixin_cols): """ Test that slicing / indexing table gives right values and col attrs inherit """ attrs = ("name", "unit", "dtype", "format", "description", "meta") m = mixin_cols["m"] m.info.name = "m" m.info.format = "{0}" m.info.description = "d" m.info.meta = {"a": 1} t = QTable([m]) for item in ([1, 3], np.array([0, 2]), slice(1, 3)): t2 = t[item] m2 = m[item] assert_table_name_col_equal(t2, "m", m[item]) for attr in attrs: assert getattr(t2["m"].info, attr) == getattr(m.info, attr) assert getattr(m2.info, attr) == getattr(m.info, attr) def test_info_preserved_pickle_copy_init(mixin_cols): """ Test copy, pickle, and init from class roundtrip preserve info. This tests not only the mixin classes but a regular column as well. """ def pickle_roundtrip(c): return pickle.loads(pickle.dumps(c)) def init_from_class(c): return c.__class__(c) attrs = ("name", "unit", "dtype", "format", "description", "meta") for colname in ("i", "m"): m = mixin_cols[colname] m.info.name = colname m.info.format = "{0}" m.info.description = "d" m.info.meta = {"a": 1} for func in (copy.copy, copy.deepcopy, pickle_roundtrip, init_from_class): m2 = func(m) for attr in attrs: # non-native byteorder not preserved by last 2 func, _except_ for structured dtype if ( attr != "dtype" or getattr(m.info.dtype, "isnative", True) or m.info.dtype.name.startswith("void") or func in (copy.copy, copy.deepcopy) ): original = getattr(m.info, attr) else: # func does not preserve byteorder, check against (native) type. original = m.info.dtype.newbyteorder("=") assert getattr(m2.info, attr) == original def check_share_memory(col1, col2, copy): """Check whether data attributes in col1 and col2 share memory. If copy=True, this should not be the case for any, while if copy=False, all should share memory. """ if isinstance(col1, SkyCoord): # For SkyCoord, .info does not access actual data by default, # but rather attributes like .ra, which are copies. map1 = col1.data.info._represent_as_dict() map2 = col2.data.info._represent_as_dict() else: map1 = col1.info._represent_as_dict() map2 = col2.info._represent_as_dict() # Check array attributes only (in principle, could iterate on, e.g., # differentials in representations, but this is enough for table). shared = [ np.may_share_memory(v1, v2) for (v1, v2) in zip(map1.values(), map2.values()) if isinstance(v1, np.ndarray) and v1.shape ] if copy: assert not any(shared) else: assert all(shared) @pytest.mark.parametrize("copy", [True, False]) def test_add_column(mixin_cols, copy): """ Test that adding a column preserves values and attributes. For copy=True, the data should be independent; for copy=False, the data should be shared, but the instance independent. """ attrs = ("name", "unit", "dtype", "format", "description", "meta") m = mixin_cols["m"] assert m.info.name is None # Make sure adding column in various ways doesn't touch info. t = QTable([m], names=["a"], copy=copy) assert m.info.name is None check_share_memory(m, t["a"], copy=copy) t["new"] = m assert m.info.name is None check_share_memory(m, t["new"], copy=True) m.info.name = "m" m.info.format = "{0}" m.info.description = "d" m.info.meta = {"a": 1} t = QTable([m], copy=copy) assert t.colnames == ["m"] check_share_memory(m, t["m"], copy=copy) t = QTable([m], names=["m1"], copy=copy) assert m.info.name == "m" assert t.colnames == ["m1"] check_share_memory(m, t["m1"], copy=copy) # Add columns m2, m3, m4 by two different methods and test expected equality t["m2"] = m check_share_memory(m, t["m2"], copy=True) m.info.name = "m3" t.add_columns([m], copy=copy) check_share_memory(m, t["m3"], copy=copy) for name in ("m2", "m3"): assert_table_name_col_equal(t, name, m) for attr in attrs: if attr != "name": assert getattr(t["m1"].info, attr) == getattr(t[name].info, attr) # Also check that one can set using a scalar. s = m[0] if type(s) is type(m) and "info" in s.__dict__: # We're not going to worry about testing classes for which scalars # are a different class than the real array, or where info is not copied. t["s"] = m[0] assert_table_name_col_equal(t, "s", m[0]) check_share_memory(m, t["s"], copy=True) for attr in attrs: if attr != "name": assert getattr(t["m1"].info, attr) == getattr(t["s"].info, attr) # While we're add it, also check a length-1 table. t = QTable([m[1:2]], names=["m"], copy=copy) check_share_memory(m, t["m"], copy=copy) if type(s) is type(m) and "info" in s.__dict__: t["s"] = m[0] assert_table_name_col_equal(t, "s", m[0]) for attr in attrs: if attr != "name": assert getattr(t["m1"].info, attr) == getattr(t["s"].info, attr) def test_vstack(): """ Vstack tables with mixin cols. """ t1 = QTable(MIXIN_COLS) t2 = QTable(MIXIN_COLS) with pytest.raises(NotImplementedError): vstack([t1, t2]) def test_insert_row(mixin_cols): """ Test inserting a row, which works for Column, Quantity, Time and SkyCoord. """ t = QTable(mixin_cols) t0 = t.copy() t["m"].info.description = "d" idxs = [0, -1, 1, 2, 3] if isinstance( t["m"], (u.Quantity, Column, time.Time, time.TimeDelta, coordinates.SkyCoord) ): t.insert_row(1, t[-1]) for name in t.colnames: col = t[name] if isinstance(col, coordinates.SkyCoord): assert skycoord_equal(col, t0[name][idxs]) else: assert np.all(col == t0[name][idxs]) assert t["m"].info.description == "d" else: with pytest.raises(ValueError) as exc: t.insert_row(1, t[-1]) assert "Unable to insert row" in str(exc.value) def test_insert_row_bad_unit(): """ Insert a row into a QTable with the wrong unit """ t = QTable([[1] * u.m]) with pytest.raises(ValueError) as exc: t.insert_row(0, (2 * u.m / u.s,)) assert "'m / s' (speed/velocity) and 'm' (length) are not convertible" in str( exc.value ) def test_convert_np_array(mixin_cols): """ Test that converting to numpy array creates an object dtype and that each instance in the array has the expected type. """ t = QTable(mixin_cols) ta = t.as_array() m = mixin_cols["m"] dtype_kind = m.dtype.kind if hasattr(m, "dtype") else "O" assert ta["m"].dtype.kind == dtype_kind def test_assignment_and_copy(): """ Test that assignment of an int, slice, and fancy index works. Along the way test that copying table works. """ for name in ("quantity", "arraywrap"): m = MIXIN_COLS[name] t0 = QTable([m], names=["m"]) for i0, i1 in ( (1, 2), (slice(0, 2), slice(1, 3)), (np.array([1, 2]), np.array([2, 3])), ): t = t0.copy() t["m"][i0] = m[i1] if name == "arraywrap": assert np.all(t["m"].data[i0] == m.data[i1]) assert np.all(t0["m"].data[i0] == m.data[i0]) assert np.all(t0["m"].data[i0] != t["m"].data[i0]) else: assert np.all(t["m"][i0] == m[i1]) assert np.all(t0["m"][i0] == m[i0]) assert np.all(t0["m"][i0] != t["m"][i0]) def test_conversion_qtable_table(): """ Test that a table round trips from QTable => Table => QTable """ qt = QTable(MIXIN_COLS) names = qt.colnames for name in names: qt[name].info.description = name t = Table(qt) for name in names: assert t[name].info.description == name if name == "quantity": assert np.all(t["quantity"] == qt["quantity"].value) assert np.all(t["quantity"].unit is qt["quantity"].unit) assert isinstance(t["quantity"], t.ColumnClass) else: assert_table_name_col_equal(t, name, qt[name]) qt2 = QTable(qt) for name in names: assert qt2[name].info.description == name assert_table_name_col_equal(qt2, name, qt[name]) def test_setitem_as_column_name(): """ Test for mixin-related regression described in #3321. """ t = Table() t["a"] = ["x", "y"] t["b"] = "b" # Previously was failing with KeyError assert np.all(t["a"] == ["x", "y"]) assert np.all(t["b"] == ["b", "b"]) def test_quantity_representation(): """ Test that table representation of quantities does not have unit """ t = QTable([[1, 2] * u.m]) assert t.pformat() == [ "col0", " m ", "----", " 1.0", " 2.0", ] def test_representation_representation(): """ Test that Representations are represented correctly. """ # With no unit we get "None" in the unit row c = coordinates.CartesianRepresentation([0], [1], [0], unit=u.one) t = Table([c]) assert t.pformat() == [ " col0 ", "------------", "(0., 1., 0.)", ] c = coordinates.CartesianRepresentation([0], [1], [0], unit="m") t = Table([c]) assert t.pformat() == [ " col0 ", " m ", "------------", "(0., 1., 0.)", ] c = coordinates.SphericalRepresentation([10] * u.deg, [20] * u.deg, [1] * u.pc) t = Table([c]) assert t.pformat() == [ " col0 ", " deg, deg, pc ", "--------------", "(10., 20., 1.)", ] c = coordinates.UnitSphericalRepresentation([10] * u.deg, [20] * u.deg) t = Table([c]) assert t.pformat() == [ " col0 ", " deg ", "----------", "(10., 20.)", ] c = coordinates.SphericalCosLatDifferential( [10] * u.mas / u.yr, [2] * u.mas / u.yr, [10] * u.km / u.s ) t = Table([c]) assert t.pformat() == [ " col0 ", "mas / yr, mas / yr, km / s", "--------------------------", " (10., 2., 10.)", ] def test_skycoord_representation(): """ Test that skycoord representation works, both in the way that the values are output and in changing the frame representation. """ # With no unit we get "None" in the unit row c = coordinates.SkyCoord([0], [1], [0], representation_type="cartesian") t = Table([c]) assert t.pformat() == [ " col0 ", "None,None,None", "--------------", " 0.0,1.0,0.0", ] # Test that info works with a dynamically changed representation c = coordinates.SkyCoord([0], [1], [0], unit="m", representation_type="cartesian") t = Table([c]) assert t.pformat() == [ " col0 ", " m,m,m ", "-----------", "0.0,1.0,0.0", ] t["col0"].representation_type = "unitspherical" assert t.pformat() == [ " col0 ", "deg,deg ", "--------", "90.0,0.0", ] t["col0"].representation_type = "cylindrical" assert t.pformat() == [ " col0 ", " m,deg,m ", "------------", "1.0,90.0,0.0", ] @pytest.mark.parametrize("as_ndarray_mixin", [True, False]) def test_ndarray_mixin(as_ndarray_mixin): """ Test directly adding various forms of structured ndarray columns to a table. Adding as NdarrayMixin is expected to be somewhat unusual after #12644 (which provides full support for structured array Column's). This test shows that the end behavior is the same in both cases. """ a = np.array([(1, "a"), (2, "b"), (3, "c"), (4, "d")], dtype="<i4," + "|U1") b = np.array( [(10, "aa"), (20, "bb"), (30, "cc"), (40, "dd")], dtype=[("x", "i4"), ("y", "U2")], ) c = np.rec.fromrecords( [(100.0, "raa"), (200.0, "rbb"), (300.0, "rcc"), (400.0, "rdd")], names=["rx", "ry"], ) d = np.arange(8, dtype="i8").reshape(4, 2) if as_ndarray_mixin: a = a.view(NdarrayMixin) b = b.view(NdarrayMixin) c = c.view(NdarrayMixin) d = d.view(NdarrayMixin) class_exp = NdarrayMixin else: class_exp = Column # Add one during initialization and the next as a new column. t = Table([a], names=["a"]) t["b"] = b t["c"] = c t["d"] = d assert isinstance(t["a"], class_exp) assert t["a"][1][1] == a[1][1] assert t["a"][2][0] == a[2][0] assert t[1]["a"][1] == a[1][1] assert t[2]["a"][0] == a[2][0] assert isinstance(t["b"], class_exp) assert t["b"][1]["x"] == b[1]["x"] assert t["b"][1]["y"] == b[1]["y"] assert t[1]["b"]["x"] == b[1]["x"] assert t[1]["b"]["y"] == b[1]["y"] assert isinstance(t["c"], class_exp) assert t["c"][1]["rx"] == c[1]["rx"] assert t["c"][1]["ry"] == c[1]["ry"] assert t[1]["c"]["rx"] == c[1]["rx"] assert t[1]["c"]["ry"] == c[1]["ry"] assert isinstance(t["d"], class_exp) assert t["d"][1][0] == d[1][0] assert t["d"][1][1] == d[1][1] assert t[1]["d"][0] == d[1][0] assert t[1]["d"][1] == d[1][1] assert t.pformat(show_dtype=True) == [ " a [f0, f1] b [x, y] c [rx, ry] d ", "(int32, str1) (int32, str2) (float64, str3) int64[2]", "------------- ------------- --------------- --------", " (1, 'a') (10, 'aa') (100., 'raa') 0 .. 1", " (2, 'b') (20, 'bb') (200., 'rbb') 2 .. 3", " (3, 'c') (30, 'cc') (300., 'rcc') 4 .. 5", " (4, 'd') (40, 'dd') (400., 'rdd') 6 .. 7", ] def test_possible_string_format_functions(): """ The QuantityInfo info class for Quantity implements a possible_string_format_functions() method that overrides the standard pprint._possible_string_format_functions() function. Test this. """ t = QTable([[1, 2] * u.m]) t["col0"].info.format = "%.3f" assert t.pformat() == [ " col0", " m ", "-----", "1.000", "2.000", ] t["col0"].info.format = "hi {:.3f}" assert t.pformat() == [ " col0 ", " m ", "--------", "hi 1.000", "hi 2.000", ] t["col0"].info.format = ".4f" assert t.pformat() == [ " col0 ", " m ", "------", "1.0000", "2.0000", ] def test_rename_mixin_columns(mixin_cols): """ Rename a mixin column. """ t = QTable(mixin_cols) tc = t.copy() t.rename_column("m", "mm") assert t.colnames == ["i", "a", "b", "mm"] if isinstance(t["mm"], table_helpers.ArrayWrapper): assert np.all(t["mm"].data == tc["m"].data) elif isinstance(t["mm"], coordinates.SkyCoord): assert np.all(t["mm"].ra == tc["m"].ra) assert np.all(t["mm"].dec == tc["m"].dec) elif isinstance(t["mm"], coordinates.BaseRepresentationOrDifferential): assert np.all(representation_equal(t["mm"], tc["m"])) else: assert np.all(t["mm"] == tc["m"]) def test_represent_mixins_as_columns_unit_fix(): """ If the unit is invalid for a column that gets serialized this would cause an exception. Fixed in #7481. """ t = Table({"a": [1, 2]}, masked=True) t["a"].unit = "not a valid unit" t["a"].mask[1] = True serialize.represent_mixins_as_columns(t) def test_primary_data_column_gets_description(): """ If the mixin defines a primary data column, that should get the description, format, etc., so no __info__ should be needed. """ t = QTable({"a": [1, 2] * u.m}) t["a"].info.description = "parrot" t["a"].info.format = "7.2f" tser = serialize.represent_mixins_as_columns(t) assert "__info__" not in tser.meta["__serialized_columns__"]["a"] assert tser["a"].format == "7.2f" assert tser["a"].description == "parrot" def test_skycoord_with_velocity(): # Regression test for gh-6447 sc = SkyCoord([1], [2], unit="deg", galcen_v_sun=None) t = Table([sc]) s = StringIO() t.write(s, format="ascii.ecsv", overwrite=True) s.seek(0) t2 = Table.read(s.read(), format="ascii.ecsv") assert skycoord_equal(t2["col0"], sc) @pytest.mark.parametrize("copy", [True, False]) @pytest.mark.parametrize("table_cls", [Table, QTable]) def test_ensure_input_info_is_unchanged(table_cls, copy): """If a mixin input to a table has no info, it should stay that way. This since having 'info' slows down slicing, etc. See gh-11066. """ q = [1, 2] * u.m assert "info" not in q.__dict__ t = table_cls([q], names=["q"], copy=copy) assert "info" not in q.__dict__ t = table_cls([q], copy=copy) assert "info" not in q.__dict__ t = table_cls({"q": q}, copy=copy) assert "info" not in q.__dict__ t["q2"] = q assert "info" not in q.__dict__ sc = SkyCoord([1, 2], [2, 3], unit="deg") t["sc"] = sc assert "info" not in sc.__dict__ def test_bad_info_class(): """Make a mixin column class that does not trigger the machinery to generate a pure column representation""" class MyArrayWrapper(ArrayWrapper): info = ParentDtypeInfo() t = Table() t["tm"] = MyArrayWrapper([0, 1, 2]) out = StringIO() match = ( r"failed to represent column 'tm' \(MyArrayWrapper\) as one or more Column" r" subclasses" ) with pytest.raises(TypeError, match=match): represent_mixins_as_columns(t)
a85e620d101e0b66def2c8ff607201b6de4f389746141bcece1fe79a61430e2e
# Licensed under a 3-clause BSD style license - see LICENSE.rst from io import StringIO import numpy as np import pytest from astropy import table from astropy import units as u from astropy.io import ascii from astropy.table import QTable, Table from astropy.table.table_helpers import simple_table from astropy.utils import console BIG_WIDE_ARR = np.arange(2000, dtype=np.float64).reshape(100, 20) SMALL_ARR = np.arange(18, dtype=np.int64).reshape(6, 3) @pytest.mark.usefixtures("table_type") class TestMultiD: def test_multidim(self, table_type): """Test printing with multidimensional column""" arr = [ np.array([[1, 2], [10, 20]], dtype=np.int64), np.array([[3, 4], [30, 40]], dtype=np.int64), np.array([[5, 6], [50, 60]], dtype=np.int64), ] t = table_type(arr) lines = t.pformat(show_dtype=True) assert lines == [ " col0 col1 col2 ", "int64[2] int64[2] int64[2]", "-------- -------- --------", " 1 .. 2 3 .. 4 5 .. 6", "10 .. 20 30 .. 40 50 .. 60", ] lines = t.pformat(html=True, show_dtype=True) assert lines == [ f'<table id="table{id(t)}">', "<thead><tr><th>col0</th><th>col1</th><th>col2</th></tr></thead>", "<thead><tr><th>int64[2]</th><th>int64[2]</th><th>int64[2]</th></tr></thead>", "<tr><td>1 .. 2</td><td>3 .. 4</td><td>5 .. 6</td></tr>", "<tr><td>10 .. 20</td><td>30 .. 40</td><td>50 .. 60</td></tr>", "</table>", ] nbclass = table.conf.default_notebook_table_class masked = "masked=True " if t.masked else "" assert t._repr_html_().splitlines() == [ f"<div><i>{table_type.__name__} {masked}length=2</i>", f'<table id="table{id(t)}" class="{nbclass}">', "<thead><tr><th>col0</th><th>col1</th><th>col2</th></tr></thead>", "<thead><tr><th>int64[2]</th><th>int64[2]</th><th>int64[2]</th></tr></thead>", "<tr><td>1 .. 2</td><td>3 .. 4</td><td>5 .. 6</td></tr>", "<tr><td>10 .. 20</td><td>30 .. 40</td><td>50 .. 60</td></tr>", "</table></div>", ] t = table_type([arr]) lines = t.pformat(show_dtype=True) assert lines == [ " col0 ", "int64[2,2]", "----------", " 1 .. 20", " 3 .. 40", " 5 .. 60", ] def test_fake_multidim(self, table_type): """Test printing with 'fake' multidimensional column""" arr = [ np.array([[(1,)], [(10,)]], dtype=np.int64), np.array([[(3,)], [(30,)]], dtype=np.int64), np.array([[(5,)], [(50,)]], dtype=np.int64), ] t = table_type(arr) lines = t.pformat(show_dtype=True) assert lines == [ " col0 col1 col2 ", "int64[1,1] int64[1,1] int64[1,1]", "---------- ---------- ----------", " 1 3 5", " 10 30 50", ] lines = t.pformat(html=True, show_dtype=True) assert lines == [ f'<table id="table{id(t)}">', "<thead><tr><th>col0</th><th>col1</th><th>col2</th></tr></thead>", "<thead><tr><th>int64[1,1]</th><th>int64[1,1]</th><th>int64[1,1]</th></tr></thead>", "<tr><td>1</td><td>3</td><td>5</td></tr>", "<tr><td>10</td><td>30</td><td>50</td></tr>", "</table>", ] nbclass = table.conf.default_notebook_table_class masked = "masked=True " if t.masked else "" assert t._repr_html_().splitlines() == [ f"<div><i>{table_type.__name__} {masked}length=2</i>", f'<table id="table{id(t)}" class="{nbclass}">', "<thead><tr><th>col0</th><th>col1</th><th>col2</th></tr></thead>", "<thead><tr><th>int64[1,1]</th><th>int64[1,1]</th><th>int64[1,1]</th></tr></thead>", "<tr><td>1</td><td>3</td><td>5</td></tr>", "<tr><td>10</td><td>30</td><td>50</td></tr>", "</table></div>", ] t = table_type([arr]) lines = t.pformat(show_dtype=True) assert lines == [ " col0 ", "int64[2,1,1]", "------------", " 1 .. 10", " 3 .. 30", " 5 .. 50", ] def test_html_escaping(): t = table.Table([('<script>alert("gotcha");</script>', 2, 3)]) nbclass = table.conf.default_notebook_table_class assert t._repr_html_().splitlines() == [ "<div><i>Table length=3</i>", f'<table id="table{id(t)}" class="{nbclass}">', "<thead><tr><th>col0</th></tr></thead>", "<thead><tr><th>str33</th></tr></thead>", "<tr><td>&lt;script&gt;alert(&quot;gotcha&quot;);&lt;/script&gt;</td></tr>", "<tr><td>2</td></tr>", "<tr><td>3</td></tr>", "</table></div>", ] @pytest.mark.usefixtures("table_type") class TestPprint: def _setup(self, table_type): self.tb = table_type(BIG_WIDE_ARR) self.tb["col0"].format = "e" self.tb["col1"].format = ".6f" self.tb["col0"].unit = "km**2" self.tb["col19"].unit = "kg s m**-2" self.ts = table_type(SMALL_ARR) def test_empty_table(self, table_type): t = table_type() lines = t.pformat() assert lines == ["<No columns>"] c = repr(t) masked = "masked=True " if t.masked else "" assert c.splitlines() == [ f"<{table_type.__name__} {masked}length=0>", "<No columns>", ] def test_format0(self, table_type): """Try getting screen size but fail to defaults because testing doesn't have access to screen (fcntl.ioctl fails). """ self._setup(table_type) arr = np.arange(4000, dtype=np.float64).reshape(100, 40) lines = table_type(arr).pformat() nlines, width = console.terminal_size() assert len(lines) == nlines for line in lines[:-1]: # skip last "Length = .. rows" line assert width - 10 < len(line) <= width def test_format1(self, table_type): """Basic test of formatting, unit header row included""" self._setup(table_type) lines = self.tb.pformat(max_lines=8, max_width=40) assert lines == [ " col0 col1 ... col19 ", " km2 ... kg s / m2", "------------ ----------- ... ---------", "0.000000e+00 1.000000 ... 19.0", " ... ... ... ...", "1.960000e+03 1961.000000 ... 1979.0", "1.980000e+03 1981.000000 ... 1999.0", "Length = 100 rows", ] def test_format2(self, table_type): """Basic test of formatting, unit header row excluded""" self._setup(table_type) lines = self.tb.pformat(max_lines=8, max_width=40, show_unit=False) assert lines == [ " col0 col1 ... col19 ", "------------ ----------- ... ------", "0.000000e+00 1.000000 ... 19.0", "2.000000e+01 21.000000 ... 39.0", " ... ... ... ...", "1.960000e+03 1961.000000 ... 1979.0", "1.980000e+03 1981.000000 ... 1999.0", "Length = 100 rows", ] def test_format3(self, table_type): """Include the unit header row""" self._setup(table_type) lines = self.tb.pformat(max_lines=8, max_width=40, show_unit=True) assert lines == [ " col0 col1 ... col19 ", " km2 ... kg s / m2", "------------ ----------- ... ---------", "0.000000e+00 1.000000 ... 19.0", " ... ... ... ...", "1.960000e+03 1961.000000 ... 1979.0", "1.980000e+03 1981.000000 ... 1999.0", "Length = 100 rows", ] def test_format4(self, table_type): """Do not include the name header row""" self._setup(table_type) lines = self.tb.pformat(max_lines=8, max_width=40, show_name=False) assert lines == [ " km2 ... kg s / m2", "------------ ----------- ... ---------", "0.000000e+00 1.000000 ... 19.0", "2.000000e+01 21.000000 ... 39.0", " ... ... ... ...", "1.960000e+03 1961.000000 ... 1979.0", "1.980000e+03 1981.000000 ... 1999.0", "Length = 100 rows", ] def test_noclip(self, table_type): """Basic table print""" self._setup(table_type) lines = self.ts.pformat(max_lines=-1, max_width=-1) assert lines == [ "col0 col1 col2", "---- ---- ----", " 0 1 2", " 3 4 5", " 6 7 8", " 9 10 11", " 12 13 14", " 15 16 17", ] def test_clip1(self, table_type): """max lines below hard limit of 8""" self._setup(table_type) lines = self.ts.pformat(max_lines=3, max_width=-1) assert lines == [ "col0 col1 col2", "---- ---- ----", " 0 1 2", " 3 4 5", " 6 7 8", " 9 10 11", " 12 13 14", " 15 16 17", ] def test_clip2(self, table_type): """max lines below hard limit of 8 and output longer than 8""" self._setup(table_type) lines = self.ts.pformat( max_lines=3, max_width=-1, show_unit=True, show_dtype=True ) assert lines == [ " col0 col1 col2", " ", "int64 int64 int64", "----- ----- -----", " 0 1 2", " ... ... ...", " 15 16 17", "Length = 6 rows", ] def test_clip3(self, table_type): """Max lines below hard limit of 8 and max width below hard limit of 10 """ self._setup(table_type) lines = self.ts.pformat(max_lines=3, max_width=1, show_unit=True) assert lines == [ "col0 ...", " ...", "---- ...", " 0 ...", " ... ...", " 12 ...", " 15 ...", "Length = 6 rows", ] def test_clip4(self, table_type): """Test a range of max_lines""" self._setup(table_type) for max_lines in (0, 1, 4, 5, 6, 7, 8, 100, 101, 102, 103, 104, 130): lines = self.tb.pformat(max_lines=max_lines, show_unit=False) assert len(lines) == max(8, min(102, max_lines)) def test_pformat_all(self, table_type): """Test that all rows are printed by default""" self._setup(table_type) lines = self.tb.pformat_all() # +3 accounts for the three header lines in this table assert len(lines) == BIG_WIDE_ARR.shape[0] + 3 @pytest.fixture def test_pprint_all(self, table_type, capsys): """Test that all rows are printed by default""" self._setup(table_type) self.tb.pprint_all() (out, err) = capsys.readouterr() # +3 accounts for the three header lines in this table assert len(out) == BIG_WIDE_ARR.shape[0] + 3 @pytest.mark.usefixtures("table_type") class TestFormat: def test_column_format(self, table_type): t = table_type([[1, 2], [3, 4]], names=("a", "b")) # default (format=None) assert str(t["a"]) == " a \n---\n 1\n 2" # just a plain format string t["a"].format = "5.2f" assert str(t["a"]) == " a \n-----\n 1.00\n 2.00" # Old-style that is almost new-style t["a"].format = "{ %4.2f }" assert str(t["a"]) == " a \n--------\n{ 1.00 }\n{ 2.00 }" # New-style that is almost old-style t["a"].format = "%{0:}" assert str(t["a"]) == " a \n---\n %1\n %2" # New-style with extra spaces t["a"].format = " {0:05d} " assert str(t["a"]) == " a \n-------\n 00001 \n 00002 " # New-style has precedence t["a"].format = "%4.2f {0:}" assert str(t["a"]) == " a \n-------\n%4.2f 1\n%4.2f 2" # Invalid format spec with pytest.raises(ValueError): t["a"].format = "fail" assert t["a"].format == "%4.2f {0:}" # format did not change def test_column_format_with_threshold(self, table_type): from astropy import conf with conf.set_temp("max_lines", 8): t = table_type([np.arange(20)], names=["a"]) t["a"].format = "%{0:}" assert str(t["a"]).splitlines() == [ " a ", "---", " %0", " %1", "...", "%18", "%19", "Length = 20 rows", ] t["a"].format = "{ %4.2f }" assert str(t["a"]).splitlines() == [ " a ", "---------", " { 0.00 }", " { 1.00 }", " ...", "{ 18.00 }", "{ 19.00 }", "Length = 20 rows", ] def test_column_format_func(self, table_type): # run most of functions twice # 1) astropy.table.pprint._format_funcs gets populated # 2) astropy.table.pprint._format_funcs gets used t = table_type([[1.0, 2.0], [3, 4]], names=("a", "b")) # mathematical function t["a"].format = lambda x: str(x * 3.0) assert str(t["a"]) == " a \n---\n3.0\n6.0" assert str(t["a"]) == " a \n---\n3.0\n6.0" def test_column_format_callable(self, table_type): # run most of functions twice # 1) astropy.table.pprint._format_funcs gets populated # 2) astropy.table.pprint._format_funcs gets used t = table_type([[1.0, 2.0], [3, 4]], names=("a", "b")) # mathematical function class format: def __call__(self, x): return str(x * 3.0) t["a"].format = format() assert str(t["a"]) == " a \n---\n3.0\n6.0" assert str(t["a"]) == " a \n---\n3.0\n6.0" def test_column_format_func_wrong_number_args(self, table_type): t = table_type([[1.0, 2.0], [3, 4]], names=("a", "b")) # function that expects wrong number of arguments def func(a, b): pass with pytest.raises(ValueError): t["a"].format = func def test_column_format_func_multiD(self, table_type): arr = [np.array([[1, 2], [10, 20]], dtype="i8")] t = table_type(arr, names=["a"]) # mathematical function t["a"].format = lambda x: str(x * 3.0) outstr = [ " a ", "------------", " 3.0 .. 6.0", "30.0 .. 60.0", ] assert str(t["a"]).splitlines() == outstr def test_column_format_func_not_str(self, table_type): t = table_type([[1.0, 2.0], [3, 4]], names=("a", "b")) # mathematical function with pytest.raises(ValueError): t["a"].format = lambda x: x * 3 def test_column_alignment(self, table_type): t = table_type( [[1], [2], [3], [4]], names=("long title a", "long title b", "long title c", "long title d"), ) t["long title a"].format = "<" t["long title b"].format = "^" t["long title c"].format = ">" t["long title d"].format = "0=" assert str(t["long title a"]) == "long title a\n------------\n1 " assert str(t["long title b"]) == "long title b\n------------\n 2 " assert str(t["long title c"]) == "long title c\n------------\n 3" assert str(t["long title d"]) == "long title d\n------------\n000000000004" class TestFormatWithMaskedElements: def test_column_format(self): t = Table([[1, 2, 3], [3, 4, 5]], names=("a", "b"), masked=True) t["a"].mask = [True, False, True] # default (format=None) assert str(t["a"]) == " a \n---\n --\n 2\n --" # just a plain format string t["a"].format = "5.2f" assert str(t["a"]) == " a \n-----\n --\n 2.00\n --" # Old-style that is almost new-style t["a"].format = "{ %4.2f }" assert str(t["a"]) == " a \n--------\n --\n{ 2.00 }\n --" # New-style that is almost old-style t["a"].format = "%{0:}" assert str(t["a"]) == " a \n---\n --\n %2\n --" # New-style with extra spaces t["a"].format = " {0:05d} " assert str(t["a"]) == " a \n-------\n --\n 00002 \n --" # New-style has precedence t["a"].format = "%4.2f {0:}" assert str(t["a"]) == " a \n-------\n --\n%4.2f 2\n --" def test_column_format_with_threshold_masked_table(self): from astropy import conf with conf.set_temp("max_lines", 8): t = Table([np.arange(20)], names=["a"], masked=True) t["a"].format = "%{0:}" t["a"].mask[0] = True t["a"].mask[-1] = True assert str(t["a"]).splitlines() == [ " a ", "---", " --", " %1", "...", "%18", " --", "Length = 20 rows", ] t["a"].format = "{ %4.2f }" assert str(t["a"]).splitlines() == [ " a ", "---------", " --", " { 1.00 }", " ...", "{ 18.00 }", " --", "Length = 20 rows", ] def test_column_format_func(self): # run most of functions twice # 1) astropy.table.pprint._format_funcs gets populated # 2) astropy.table.pprint._format_funcs gets used t = Table([[1.0, 2.0, 3.0], [3, 4, 5]], names=("a", "b"), masked=True) t["a"].mask = [True, False, True] # mathematical function t["a"].format = lambda x: str(x * 3.0) assert str(t["a"]) == " a \n---\n --\n6.0\n --" assert str(t["a"]) == " a \n---\n --\n6.0\n --" def test_column_format_func_with_special_masked(self): # run most of functions twice # 1) astropy.table.pprint._format_funcs gets populated # 2) astropy.table.pprint._format_funcs gets used t = Table([[1.0, 2.0, 3.0], [3, 4, 5]], names=("a", "b"), masked=True) t["a"].mask = [True, False, True] # mathematical function def format_func(x): if x is np.ma.masked: return "!!" else: return str(x * 3.0) t["a"].format = format_func assert str(t["a"]) == " a \n---\n !!\n6.0\n !!" assert str(t["a"]) == " a \n---\n !!\n6.0\n !!" def test_column_format_callable(self): # run most of functions twice # 1) astropy.table.pprint._format_funcs gets populated # 2) astropy.table.pprint._format_funcs gets used t = Table([[1.0, 2.0, 3.0], [3, 4, 5]], names=("a", "b"), masked=True) t["a"].mask = [True, False, True] # mathematical function class format: def __call__(self, x): return str(x * 3.0) t["a"].format = format() assert str(t["a"]) == " a \n---\n --\n6.0\n --" assert str(t["a"]) == " a \n---\n --\n6.0\n --" def test_column_format_func_wrong_number_args(self): t = Table([[1.0, 2.0], [3, 4]], names=("a", "b"), masked=True) t["a"].mask = [True, False] # function that expects wrong number of arguments def func(a, b): pass with pytest.raises(ValueError): t["a"].format = func # but if all are masked, it never gets called t["a"].mask = [True, True] assert str(t["a"]) == " a \n---\n --\n --" def test_column_format_func_multiD(self): arr = [np.array([[1, 2], [10, 20]], dtype="i8")] t = Table(arr, names=["a"], masked=True) t["a"].mask[0, 1] = True t["a"].mask[1, 1] = True # mathematical function t["a"].format = lambda x: str(x * 3.0) outstr = [ " a ", "----------", " 3.0 .. --", "30.0 .. --", ] assert str(t["a"]).splitlines() == outstr assert str(t["a"]).splitlines() == outstr def test_pprint_npfloat32(): """ Test for #148, that np.float32 cannot by itself be formatted as float, but has to be converted to a python float. """ dat = np.array([1.0, 2.0], dtype=np.float32) t = Table([dat], names=["a"]) t["a"].format = "5.2f" assert str(t["a"]) == " a \n-----\n 1.00\n 2.00" def test_pprint_py3_bytes(): """ Test for #1346 and #4944. Make sure a bytestring (dtype=S<N>) in Python 3 is printed correctly (without the "b" prefix like b'string'). """ val = bytes("val", encoding="utf-8") blah = "bläh".encode() dat = np.array([val, blah], dtype=[("col", "S10")]) t = table.Table(dat) assert t["col"].pformat() == ["col ", "----", " val", "bläh"] def test_pprint_structured(): su = table.Column( [ (1, (1.5, [1.6, 1.7])), (2, (2.5, [2.6, 2.7])), ], name="su", dtype=[ ("i", np.int64), ("f", [("p0", np.float64), ("p1", np.float64, (2,))]), ], ) assert su.pformat() == [ " su [i, f[p0, p1]] ", "----------------------", "(1, (1.5, [1.6, 1.7]))", "(2, (2.5, [2.6, 2.7]))", ] t = table.Table([su]) assert t.pformat() == su.pformat() assert repr(t).splitlines() == [ "<Table length=2>", " su [i, f[p0, p1]] ", "(int64, (float64, float64[2]))", "------------------------------", " (1, (1.5, [1.6, 1.7]))", " (2, (2.5, [2.6, 2.7]))", ] def test_pprint_structured_with_format(): dtype = np.dtype([("par", "f8"), ("min", "f8"), ("id", "i4"), ("name", "U4")]) c = table.Column( [ (1.2345678, -20, 3, "bar"), (12.345678, 4.5678, 33, "foo"), ], dtype=dtype, ) t = table.Table() t["a"] = [1, 2] t["c"] = c t["c"].info.format = "{par:6.2f} {min:5.1f} {id:03d} {name:4s}" exp = [ " a c [par, min, id, name]", "--- ----------------------", " 1 1.23 -20.0 003 bar ", " 2 12.35 4.6 033 foo ", ] assert t.pformat_all() == exp def test_pprint_nameless_col(): """Regression test for #2213, making sure a nameless column can be printed using None as the name. """ col = table.Column([1.0, 2.0]) assert str(col).startswith("None") def test_html(): """Test HTML printing""" dat = np.array([1.0, 2.0], dtype=np.float32) t = Table([dat], names=["a"]) lines = t.pformat(html=True) assert lines == [ f'<table id="table{id(t)}">', "<thead><tr><th>a</th></tr></thead>", "<tr><td>1.0</td></tr>", "<tr><td>2.0</td></tr>", "</table>", ] lines = t.pformat(html=True, tableclass="table-striped") assert lines == [ f'<table id="table{id(t)}" class="table-striped">', "<thead><tr><th>a</th></tr></thead>", "<tr><td>1.0</td></tr>", "<tr><td>2.0</td></tr>", "</table>", ] lines = t.pformat(html=True, tableclass=["table", "table-striped"]) assert lines == [ f'<table id="table{id(t)}" class="table table-striped">', "<thead><tr><th>a</th></tr></thead>", "<tr><td>1.0</td></tr>", "<tr><td>2.0</td></tr>", "</table>", ] def test_align(): t = simple_table(2, kinds="iS") assert t.pformat() == [ " a b ", "--- ---", " 1 b", " 2 c", ] # Use column format attribute t["a"].format = "<" assert t.pformat() == [ " a b ", "--- ---", "1 b", "2 c", ] # Now override column format attribute with various combinations of align tpf = [" a b ", "--- ---", " 1 b ", " 2 c "] for align in ("^", ["^", "^"], ("^", "^")): assert tpf == t.pformat(align=align) assert t.pformat(align="<") == [ " a b ", "--- ---", "1 b ", "2 c ", ] assert t.pformat(align="0=") == [ " a b ", "--- ---", "001 00b", "002 00c", ] assert t.pformat(align=["<", "^"]) == [ " a b ", "--- ---", "1 b ", "2 c ", ] # Now use fill characters. Stress the system using a fill # character that is the same as an align character. t = simple_table(2, kinds="iS") assert t.pformat(align="^^") == [ " a b ", "--- ---", "^1^ ^b^", "^2^ ^c^", ] assert t.pformat(align="^>") == [ " a b ", "--- ---", "^^1 ^^b", "^^2 ^^c", ] assert t.pformat(align="^<") == [ " a b ", "--- ---", "1^^ b^^", "2^^ c^^", ] # Complicated interaction (same as narrative docs example) t1 = Table([[1.0, 2.0], [1, 2]], names=["column1", "column2"]) t1["column1"].format = "#^.2f" assert t1.pformat() == [ "column1 column2", "------- -------", "##1.00# 1", "##2.00# 2", ] assert t1.pformat(align="!<") == [ "column1 column2", "------- -------", "1.00!!! 1!!!!!!", "2.00!!! 2!!!!!!", ] assert t1.pformat(align=[None, "!<"]) == [ "column1 column2", "------- -------", "##1.00# 1!!!!!!", "##2.00# 2!!!!!!", ] # Zero fill t["a"].format = "+d" assert t.pformat(align="0=") == [ " a b ", "--- ---", "+01 00b", "+02 00c", ] with pytest.raises(ValueError): t.pformat(align=["fail"]) with pytest.raises(TypeError): t.pformat(align=0) with pytest.raises(TypeError): t.pprint(align=0) # Make sure pprint() does not raise an exception t.pprint() with pytest.raises(ValueError): t.pprint(align=["<", "<", "<"]) with pytest.raises(ValueError): t.pprint(align="x=") def test_auto_format_func(): """Test for #5802 (fix for #5800 where format_func key is not unique)""" t = Table([[1, 2] * u.m]) t["col0"].format = "%f" t.pformat() # Force caching of format function qt = QTable(t) qt.pformat() # Generates exception prior to #5802 def test_decode_replace(): """ Test printing a bytestring column with a value that fails decoding to utf-8 and gets replaced by U+FFFD. See https://docs.python.org/3/library/codecs.html#codecs.replace_errors """ t = Table([[b"Z\xf0"]]) assert t.pformat() == [ "col0", "----", " Z\ufffd", ] class TestColumnsShowHide: """Tests of show and hide table columns""" def setup_method(self): self.t = simple_table(size=1, cols=4, kinds="i") @pytest.mark.parametrize("attr", ("pprint_exclude_names", "pprint_include_names")) def test_basic(self, attr): t = self.t assert ( repr(getattr(Table, attr)) == f"<PprintIncludeExclude name={attr} default=None>" ) t_show_hide = getattr(t, attr) assert repr(t_show_hide) == f"<PprintIncludeExclude name={attr} value=None>" # Default value is None assert t_show_hide() is None def test_slice(self): t = self.t t.pprint_include_names = "a" t.pprint_exclude_names = "b" t2 = t[0:1] assert t2.pprint_include_names() == ("a",) assert t2.pprint_exclude_names() == ("b",) def test_copy(self): t = self.t t.pprint_include_names = "a" t.pprint_exclude_names = "b" t2 = t.copy() assert t2.pprint_include_names() == ("a",) assert t2.pprint_exclude_names() == ("b",) t2.pprint_include_names = "c" t2.pprint_exclude_names = "d" assert t.pprint_include_names() == ("a",) assert t.pprint_exclude_names() == ("b",) assert t2.pprint_include_names() == ("c",) assert t2.pprint_exclude_names() == ("d",) @pytest.mark.parametrize("attr", ("pprint_exclude_names", "pprint_include_names")) @pytest.mark.parametrize("value", ("z", ["a", "z"])) def test_setting(self, attr, value): t = self.t t_show_hide = getattr(t, attr) # Expected attribute value ('z',) or ('a', 'z') exp = (value,) if isinstance(value, str) else tuple(value) # Context manager, can include column names that do not exist with t_show_hide.set(value): assert t_show_hide() == exp assert t.meta["__attributes__"] == {attr: exp} assert t_show_hide() is None # Setting back to None clears out meta assert t.meta == {} # Do `t.pprint_include_names/hide = value` setattr(t, attr, value) assert t_show_hide() == exp # Clear attribute t_show_hide.set(None) assert t_show_hide() is None # Now use set() method t_show_hide.set(value) assert t_show_hide() == exp with t_show_hide.set(None): assert t_show_hide() is None assert t.meta == {} assert t_show_hide() == exp @pytest.mark.parametrize("attr", ("pprint_exclude_names", "pprint_include_names")) @pytest.mark.parametrize("value", ("z", ["a", "z"], ("a", "z"))) def test_add_remove(self, attr, value): t = self.t t_show_hide = getattr(t, attr) # Expected attribute value ('z') or ('a', 'z') exp = (value,) if isinstance(value, str) else tuple(value) # add() method for str or list of str t_show_hide.add(value) assert t_show_hide() == exp # Adding twice has no effect t_show_hide.add(value) assert t_show_hide() == exp # Remove values (str or list of str). Reverts to None if all names are # removed. t_show_hide.remove(value) assert t_show_hide() is None # Remove just one name, possibly leaving a name. t_show_hide.add(value) t_show_hide.remove("z") assert t_show_hide() == (None if value == "z" else ("a",)) # Cannot remove name not in the list t_show_hide.set(["a", "z"]) with pytest.raises(ValueError, match=f"x not in {attr}"): t_show_hide.remove(("x", "z")) @pytest.mark.parametrize("attr", ("pprint_exclude_names", "pprint_include_names")) def test_rename(self, attr): t = self.t t_hide_show = getattr(t, attr) t_hide_show.set(["a", "b"]) t.rename_column("a", "aa") assert t_hide_show() == ("aa", "b") @pytest.mark.parametrize("attr", ("pprint_exclude_names", "pprint_include_names")) def test_remove(self, attr): t = self.t t_hide_show = getattr(t, attr) t_hide_show.set(["a", "b"]) del t["a"] assert t_hide_show() == ("b",) def test_serialization(self): # Serialization works for ECSV. Currently fails for FITS, works with # HDF5. t = self.t t.pprint_exclude_names = ["a", "y"] t.pprint_include_names = ["b", "z"] out = StringIO() ascii.write(t, out, format="ecsv") t2 = ascii.read(out.getvalue(), format="ecsv") assert t2.pprint_exclude_names() == ("a", "y") assert t2.pprint_include_names() == ("b", "z") def test_output(self): """Test that pprint_include/exclude_names actually changes the print output""" t = self.t exp = [ " b d ", "--- ---", " 2 4", ] with t.pprint_exclude_names.set(["a", "c"]): out = t.pformat_all() assert out == exp with t.pprint_include_names.set(["b", "d"]): out = t.pformat_all() assert out == exp with t.pprint_exclude_names.set(["a", "c"]): out = t.pformat_all() assert out == exp with t.pprint_include_names.set(["b", "d"]): out = t.pformat_all() assert out == exp # Mixture (not common in practice but possible). Note, the trailing # backslash instead of parens is needed for Python < 3.9. See: # https://bugs.python.org/issue12782. with t.pprint_include_names.set(["b", "c", "d"]), t.pprint_exclude_names.set( ["c"] ): out = t.pformat_all() assert out == exp def test_output_globs(self): """Test that pprint_include/exclude_names works with globs (fnmatch)""" t = self.t t["a2"] = 1 t["a23"] = 2 # Show only the a* columns exp = [ " a a2 a23", "--- --- ---", " 1 1 2", ] with t.pprint_include_names.set("a*"): out = t.pformat_all() assert out == exp # Show a* but exclude a?? exp = [ " a a2", "--- ---", " 1 1", ] with t.pprint_include_names.set("a*"), t.pprint_exclude_names.set("a??"): out = t.pformat_all() assert out == exp # Exclude a?? exp = [ " a b c d a2", "--- --- --- --- ---", " 1 2 3 4 1", ] with t.pprint_exclude_names.set("a??"): out = t.pformat_all() assert out == exp def test_embedded_newline_tab(): """Newlines and tabs are escaped in table repr""" t = Table( rows=[ ["a", "b \n c \t \n d"], ["x", "y\n"], ] ) exp = [ r"col0 col1 ", r"---- --------------", r" a b \n c \t \n d", r" x y\n", ] assert t.pformat_all() == exp def test_multidims_with_zero_dim(): """Test of fix for #13836 when a zero-dim column is present""" t = Table() t["a"] = ["a", "b"] t["b"] = np.ones(shape=(2, 0, 1), dtype=np.float64) exp = [ " a b ", "str1 float64[0,1]", "---- ------------", " a ", " b ", ] assert t.pformat_all(show_dtype=True) == exp
09fd5de1065e3408d221e7c68e08461c9d09d654e3f6878e54277a15db39fe43
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest from astropy.table.bst import BST def get_tree(TreeType): b = TreeType([], []) for val in [5, 2, 9, 3, 4, 1, 6, 10, 8, 7]: b.add(val) return b @pytest.fixture def tree(): return get_tree(BST) r""" 5 / \ 2 9 / \ / \ 1 3 6 10 \ \ 4 8 / 7 """ @pytest.fixture def bst(tree): return tree def test_bst_add(bst): root = bst.root assert root.data == [5] assert root.left.data == [2] assert root.right.data == [9] assert root.left.left.data == [1] assert root.left.right.data == [3] assert root.right.left.data == [6] assert root.right.right.data == [10] assert root.left.right.right.data == [4] assert root.right.left.right.data == [8] assert root.right.left.right.left.data == [7] def test_bst_dimensions(bst): assert bst.size == 10 assert bst.height == 4 def test_bst_find(tree): bst = tree for i in range(1, 11): node = bst.find(i) assert node == [i] assert bst.find(0) == [] assert bst.find(11) == [] assert bst.find("1") == [] def test_bst_traverse(bst): preord = [5, 2, 1, 3, 4, 9, 6, 8, 7, 10] inord = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] postord = [1, 4, 3, 2, 7, 8, 6, 10, 9, 5] traversals = {} for order in ("preorder", "inorder", "postorder"): traversals[order] = [x.key for x in bst.traverse(order)] assert traversals["preorder"] == preord assert traversals["inorder"] == inord assert traversals["postorder"] == postord def test_bst_remove(bst): order = (6, 9, 1, 3, 7, 2, 10, 5, 4, 8) vals = set(range(1, 11)) for i, val in enumerate(order): assert bst.remove(val) is True assert bst.is_valid() assert {x.key for x in bst.traverse("inorder")} == vals.difference( order[: i + 1] ) assert bst.size == 10 - i - 1 assert bst.remove(-val) is False def test_bst_duplicate(bst): bst.add(10, 11) assert bst.find(10) == [10, 11] assert bst.remove(10, data=10) is True assert bst.find(10) == [11] with pytest.raises(ValueError): bst.remove(10, data=30) # invalid data assert bst.remove(10) is True assert bst.remove(10) is False def test_bst_range(tree): bst = tree lst = bst.range_nodes(4, 8) assert sorted(x.key for x in lst) == [4, 5, 6, 7, 8] lst = bst.range_nodes(10, 11) assert [x.key for x in lst] == [10] lst = bst.range_nodes(11, 20) assert len(lst) == 0
e5cee6978896e6f0f123b5a4273b22d3fbf2d6e1164edb751bdf2cfd053f506f
from copy import copy import pytest from numpy.testing import assert_equal from astropy.table import Table from astropy.table.mixins.registry import ( MixinRegistryError, _handlers, get_mixin_handler, register_mixin_handler, ) from astropy.table.table_helpers import ArrayWrapper ORIGINAL = {} def setup_function(function): ORIGINAL["handlers"] = copy(_handlers) _handlers.clear() def teardown_function(function): _handlers.clear() _handlers.update(ORIGINAL["handlers"]) class SpamData: pass class SpamWrapper(ArrayWrapper): def __init__(self): super().__init__([0, 1, 3, 4, 5]) FULL_QUALNAME = "astropy.table.mixins.tests.test_registry.SpamData" def handle_spam(obj): return SpamWrapper() def handle_spam_alt(obj): return SpamWrapper() def test_no_handler(): data = SpamData() assert get_mixin_handler(data) is None def test_register_handler(): register_mixin_handler(FULL_QUALNAME, handle_spam) assert get_mixin_handler(SpamData()) is handle_spam def test_register_handler_override(): register_mixin_handler(FULL_QUALNAME, handle_spam) with pytest.raises(MixinRegistryError) as exc: register_mixin_handler(FULL_QUALNAME, handle_spam_alt) assert ( exc.value.args[0] == "Handler for class astropy.table.mixins.tests.test_registry.SpamData is" " already defined" ) register_mixin_handler(FULL_QUALNAME, handle_spam_alt, force=True) assert get_mixin_handler(SpamData()) is handle_spam_alt def test_get_mixin_handler_str(): # Check that we can also pass a fully qualified name to get_mixin_handler register_mixin_handler(FULL_QUALNAME, handle_spam) assert get_mixin_handler(FULL_QUALNAME) is handle_spam def test_add_column(): t = Table() with pytest.raises(TypeError): t["a"] = SpamData() register_mixin_handler(FULL_QUALNAME, handle_spam) t["a"] = SpamData() assert len(t) == 5 assert isinstance(t["a"], SpamWrapper) assert_equal(t["a"].data, [0, 1, 3, 4, 5]) def invalid_handler(obj): return "invalid" def test_invalid_handler(): t = Table() register_mixin_handler(FULL_QUALNAME, invalid_handler) with pytest.raises(TypeError) as exc: t["a"] = SpamData() assert ( exc.value.args[0] == f"Mixin handler for object of type {FULL_QUALNAME} " "did not return a valid mixin column" )
b6d0237cc3462c05483a72c078c72b170250b730e6a5c603a8abbce7c5b428be
import numpy as np import pytest from numpy.testing import assert_equal from astropy.table import Table da = pytest.importorskip("dask.array") class TestDaskHandler: def setup_method(self, method): self.t = Table() self.t["a"] = da.arange(10) def test_add_row(self): self.t.add_row(self.t[0]) assert_equal(self.t["a"].compute(), np.hstack([np.arange(10), 0])) def test_get_column(self): assert isinstance(self.t["a"], da.Array) assert_equal(self.t["a"].compute(), np.arange(10)) def test_slicing_row_single(self): sub = self.t[5] assert isinstance(sub["a"], da.Array) assert not hasattr(sub["a"], "info") # should be a plain dask array assert sub["a"].compute() == 5 def test_slicing_row_range(self): sub = self.t[5:] assert isinstance(sub["a"], da.Array) assert hasattr(sub["a"], "info") # should be a mixin column assert_equal(sub["a"].compute(), np.arange(5, 10)) def test_slicing_column_range(self): sub = self.t[("a",)] assert isinstance(sub["a"], da.Array) assert hasattr(sub["a"], "info") # should be a mixin column assert_equal(sub["a"].compute(), np.arange(10)) def test_pformat(self): assert self.t.pformat_all() == [ " a ", "---", " 0", " 1", " 2", " 3", " 4", " 5", " 6", " 7", " 8", " 9", ] def test_info_preserved(self): self.t["a"].info.description = "A dask column" sub = self.t[1:3] assert sub["a"].info.name == "a" assert sub["a"].info.description == "A dask column" col = self.t["a"].copy() assert col.info.name == "a" assert col.info.description == "A dask column" self.t.add_row(self.t[0]) assert self.t["a"].info.name == "a" assert self.t["a"].info.description == "A dask column"
6729531b0d4eb835caf8b912ef8e49f57020d7d780b1c72fbc3ab64b6be3e924
# Licensed under a 3-clause BSD style license - see LICENSE.rst import itertools from contextlib import nullcontext import numpy as np import pytest from numpy.testing import ( assert_allclose, assert_array_almost_equal_nulp, assert_array_equal, ) from astropy import units as u from astropy.convolution.convolve import convolve, convolve_fft from astropy.utils.exceptions import AstropyUserWarning VALID_DTYPES = (">f4", "<f4", ">f8", "<f8") VALID_DTYPE_MATRIX = list(itertools.product(VALID_DTYPES, VALID_DTYPES)) BOUNDARY_OPTIONS = [None, "fill", "wrap"] NANTREATMENT_OPTIONS = ("interpolate", "fill") NORMALIZE_OPTIONS = [True, False] PRESERVE_NAN_OPTIONS = [True, False] """ What does convolution mean? We use the 'same size' assumption here (i.e., you expect an array of the exact same size as the one you put in) Convolving any array with a kernel that is [1] should result in the same array returned Working example array: [1, 2, 3, 4, 5] Convolved with [1] = [1, 2, 3, 4, 5] Convolved with [1, 1] = [1, 3, 5, 7, 9] THIS IS NOT CONSISTENT! Convolved with [1, 0] = [1, 2, 3, 4, 5] Convolved with [0, 1] = [0, 1, 2, 3, 4] """ # NOTE: use_numpy_fft is redundant if you don't have FFTW installed option_names = ("boundary", "nan_treatment", "normalize_kernel", "dealias") options = list( itertools.product( BOUNDARY_OPTIONS, NANTREATMENT_OPTIONS, (True, False), (True, False) ) ) option_names_preserve_nan = ( "boundary", "nan_treatment", "normalize_kernel", "preserve_nan", ) options_preserve_nan = list( itertools.product( BOUNDARY_OPTIONS, NANTREATMENT_OPTIONS, (True, False), (True, False) ) ) def expected_boundary_warning(boundary=None): # Helper that returns the appropriate context manager for the boundary=None # warning depending on the value of boundary. if boundary is None: ctx = pytest.warns( AstropyUserWarning, match=( "The convolve_fft version of boundary=None " "is equivalent to the convolve boundary='fill'" ), ) else: ctx = nullcontext() return ctx def expected_dealias_error(boundary=None, dealias=False): # Helper that returns the appropriate context manager for the boundary=None # warning depending on the value of boundary. if dealias and boundary == "wrap": ctx = pytest.raises(ValueError) else: ctx = nullcontext() return ctx def assert_floatclose(x, y): """Assert arrays are close to within expected floating point rounding. Check that the result is correct at the precision expected for 64 bit numbers, taking account that the tolerance has to reflect that all powers in the FFTs enter our values. """ # The number used is set by the fact that the Windows FFT sometimes # returns an answer that is EXACTLY 10*np.spacing. assert_allclose(x, y, atol=10 * np.spacing(x.max()), rtol=0.0) class TestConvolve1D: @pytest.mark.parametrize(option_names, options) def test_quantity(self, boundary, nan_treatment, normalize_kernel, dealias): """ Test that convolve_fft works correctly when input array is a Quantity """ x = np.array([1.0, 4.0, 5.0, 6.0, 5.0, 7.0, 8.0], dtype="float64") * u.ph y = np.array([0.2, 0.6, 0.2], dtype="float64") with expected_boundary_warning(boundary=boundary): with expected_dealias_error(boundary=boundary, dealias=dealias): z = convolve_fft( x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, dealias=dealias, ) assert x.unit == z.unit @pytest.mark.parametrize(option_names, options) def test_unity_1_none(self, boundary, nan_treatment, normalize_kernel, dealias): """ Test that a unit kernel with a single element returns the same array """ x = np.array([1.0, 2.0, 3.0], dtype="float64") y = np.array([1.0], dtype="float64") with expected_boundary_warning(boundary=boundary): with expected_dealias_error(boundary=boundary, dealias=dealias): z = convolve_fft( x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, dealias=dealias, ) assert_floatclose(z, x) @pytest.mark.parametrize(option_names, options) def test_unity_3(self, boundary, nan_treatment, normalize_kernel, dealias): """ Test that a unit kernel with three elements returns the same array (except when boundary is None). """ x = np.array([1.0, 2.0, 3.0], dtype="float64") y = np.array([0.0, 1.0, 0.0], dtype="float64") with expected_boundary_warning(boundary=boundary): with expected_dealias_error(boundary=boundary, dealias=dealias): z = convolve_fft( x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, dealias=dealias, ) assert_floatclose(z, x) @pytest.mark.parametrize(option_names, options) def test_uniform_3(self, boundary, nan_treatment, normalize_kernel, dealias): """ Test that the different modes are producing the correct results using a uniform kernel with three elements """ x = np.array([1.0, 0.0, 3.0], dtype="float64") y = np.array([1.0, 1.0, 1.0], dtype="float64") with expected_boundary_warning(boundary=boundary): with expected_dealias_error(boundary=boundary, dealias=dealias): z = convolve_fft( x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, dealias=dealias, ) answer_key = (boundary, nan_treatment, normalize_kernel) answer_dict = { "sum_fill_zeros": np.array([1.0, 4.0, 3.0], dtype="float64"), "average_fill_zeros": np.array( [1 / 3.0, 4 / 3.0, 1.0], dtype="float64" ), "sum_wrap": np.array([4.0, 4.0, 4.0], dtype="float64"), "average_wrap": np.array( [4 / 3.0, 4 / 3.0, 4 / 3.0], dtype="float64" ), } result_dict = { # boundary, nan_treatment, normalize_kernel ("fill", "interpolate", True): answer_dict["average_fill_zeros"], ("wrap", "interpolate", True): answer_dict["average_wrap"], ("fill", "interpolate", False): answer_dict["sum_fill_zeros"], ("wrap", "interpolate", False): answer_dict["sum_wrap"], } for k in list(result_dict.keys()): result_dict[(k[0], "fill", k[2])] = result_dict[k] for k in list(result_dict.keys()): if k[0] == "fill": result_dict[(None, k[1], k[2])] = result_dict[k] assert_floatclose(z, result_dict[answer_key]) @pytest.mark.parametrize(option_names, options) def test_halfity_3(self, boundary, nan_treatment, normalize_kernel, dealias): """ Test that the different modes are producing the correct results using a uniform, non-unity kernel with three elements """ x = np.array([1.0, 0.0, 3.0], dtype="float64") y = np.array([0.5, 0.5, 0.5], dtype="float64") with expected_boundary_warning(boundary=boundary): with expected_dealias_error(boundary=boundary, dealias=dealias): z = convolve_fft( x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, dealias=dealias, ) answer_dict = { "sum": np.array([0.5, 2.0, 1.5], dtype="float64"), "sum_zeros": np.array([0.5, 2.0, 1.5], dtype="float64"), "sum_nozeros": np.array([0.5, 2.0, 1.5], dtype="float64"), "average": np.array([1 / 3.0, 4 / 3.0, 1.0], dtype="float64"), "sum_wrap": np.array([2.0, 2.0, 2.0], dtype="float64"), "average_wrap": np.array( [4 / 3.0, 4 / 3.0, 4 / 3.0], dtype="float64" ), "average_zeros": np.array([1 / 3.0, 4 / 3.0, 1.0], dtype="float64"), "average_nozeros": np.array([0.5, 4 / 3.0, 1.5], dtype="float64"), } if normalize_kernel: answer_key = "average" else: answer_key = "sum" if boundary == "wrap": answer_key += "_wrap" else: # average = average_zeros; sum = sum_zeros answer_key += "_zeros" assert_floatclose(z, answer_dict[answer_key]) @pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan) def test_unity_3_withnan( self, boundary, nan_treatment, normalize_kernel, preserve_nan ): """ Test that a unit kernel with three elements returns the same array (except when boundary is None). This version includes a NaN value in the original array. """ x = np.array([1.0, np.nan, 3.0], dtype="float64") y = np.array([0.0, 1.0, 0.0], dtype="float64") with expected_boundary_warning(boundary=boundary): z = convolve_fft( x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, preserve_nan=preserve_nan, ) if preserve_nan: assert np.isnan(z[1]) z = np.nan_to_num(z) assert_floatclose(z, [1.0, 0.0, 3.0]) inputs = ( np.array([1.0, np.nan, 3.0], dtype="float64"), np.array([1.0, np.inf, 3.0], dtype="float64"), ) outputs = ( np.array([1.0, 0.0, 3.0], dtype="float64"), np.array([1.0, 0.0, 3.0], dtype="float64"), ) options_unity1withnan = list( itertools.product( BOUNDARY_OPTIONS, NANTREATMENT_OPTIONS, (True, False), (True, False), inputs, outputs, ) ) @pytest.mark.parametrize( option_names_preserve_nan + ("inval", "outval"), options_unity1withnan ) def test_unity_1_withnan( self, boundary, nan_treatment, normalize_kernel, preserve_nan, inval, outval ): """ Test that a unit kernel with three elements returns the same array (except when boundary is None). This version includes a NaN value in the original array. """ x = inval y = np.array([1.0], dtype="float64") with expected_boundary_warning(boundary=boundary): z = convolve_fft( x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, preserve_nan=preserve_nan, ) if preserve_nan: assert np.isnan(z[1]) z = np.nan_to_num(z) assert_floatclose(z, outval) @pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan) def test_uniform_3_withnan( self, boundary, nan_treatment, normalize_kernel, preserve_nan ): """ Test that the different modes are producing the correct results using a uniform kernel with three elements. This version includes a NaN value in the original array. """ x = np.array([1.0, np.nan, 3.0], dtype="float64") y = np.array([1.0, 1.0, 1.0], dtype="float64") with expected_boundary_warning(boundary=boundary): z = convolve_fft( x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, preserve_nan=preserve_nan, ) if preserve_nan: assert np.isnan(z[1]) answer_dict = { "sum": np.array([1.0, 4.0, 3.0], dtype="float64"), "sum_nozeros": np.array([1.0, 4.0, 3.0], dtype="float64"), "sum_zeros": np.array([1.0, 4.0, 3.0], dtype="float64"), "sum_nozeros_interpnan": np.array([1.0, 4.0, 3.0], dtype="float64"), "average": np.array([1.0, 2.0, 3.0], dtype="float64"), "sum_wrap": np.array([4.0, 4.0, 4.0], dtype="float64"), "average_wrap": np.array([4 / 3.0, 4 / 3.0, 4 / 3.0], dtype="float64"), "average_wrap_interpnan": np.array([2, 2, 2], dtype="float64"), "average_nozeros": np.array([1 / 2.0, 4 / 3.0, 3 / 2.0], dtype="float64"), "average_nozeros_interpnan": np.array([1.0, 2.0, 3.0], dtype="float64"), "average_zeros": np.array([1 / 3.0, 4 / 3.0, 3 / 3.0], dtype="float64"), "average_zeros_interpnan": np.array( [1 / 2.0, 4 / 2.0, 3 / 2.0], dtype="float64" ), } for key in list(answer_dict.keys()): if "sum" in key: answer_dict[key + "_interpnan"] = answer_dict[key] * 3.0 / 2.0 if normalize_kernel: answer_key = "average" else: answer_key = "sum" if boundary == "wrap": answer_key += "_wrap" else: # average = average_zeros; sum = sum_zeros answer_key += "_zeros" if nan_treatment == "interpolate": answer_key += "_interpnan" posns = np.isfinite(z) answer = answer_dict[answer_key][posns] # check that fill is set and that the 1'th position that was originally # NaN is included in the check if (nan_treatment == "fill") and posns[1]: # we fill the center with the sum of the input array divided by # three, since we've now pre-filled the center value with zero answer[1] = 4 / (3.0 if normalize_kernel else 1.0) assert_floatclose(z[posns], answer) def test_nan_interpolate(self): # Test masked array array = np.array([1.0, np.nan, 3.0], dtype="float64") kernel = np.array([1, 1, 1]) masked_array = np.ma.masked_array(array, mask=[0, 1, 0]) result = convolve_fft( masked_array, kernel, boundary="fill", nan_treatment="interpolate", fill_value=np.nan, ) assert_floatclose(result, [1, 2, 3]) def test_nan_fill(self): # regression for #8121 # Test masked array array = np.array([1.0, np.nan, 3.0], dtype="float64") kernel = np.array([1, 1, 1]) result = convolve_fft( array, kernel, boundary="fill", nan_treatment="fill", fill_value=0 ) # note that, because fill_value also affects boundary='fill', the edge # pixels are treated as zero rather than being ignored. assert_floatclose(result, [1 / 3.0, 4 / 3.0, 1.0]) def test_nan_fill_two(self): # regression for #8121 # Test masked array array = np.array([1.0, np.nan, 3.0], dtype="float64") kernel = np.array([1, 1, 1]) result = convolve_fft( array, kernel, boundary="fill", nan_treatment="fill", fill_value=1 ) # note that, because fill_value also affects boundary='fill', the edge # pixels are treated as fill_value=1 rather than being ignored. assert_floatclose(result, [1.0, 5 / 3.0, 5 / 3.0]) def test_masked_array(self): """ Check whether convolve_fft works with masked arrays. """ # Test masked array array = np.array([1.0, 2.0, 3.0], dtype="float64") kernel = np.array([1, 1, 1]) masked_array = np.ma.masked_array(array, mask=[0, 1, 0]) result = convolve_fft(masked_array, kernel, boundary="fill", fill_value=0.0) assert_floatclose(result, [1.0 / 2, 2, 3.0 / 2]) # Now test against convolve() convolve_result = convolve( masked_array, kernel, boundary="fill", fill_value=0.0 ) assert_floatclose(convolve_result, result) # Test masked kernel array = np.array([1.0, 2.0, 3.0], dtype="float64") kernel = np.array([1, 1, 1]) masked_kernel = np.ma.masked_array(kernel, mask=[0, 1, 0]) result = convolve_fft(array, masked_kernel, boundary="fill", fill_value=0.0) assert_floatclose(result, [1, 2, 1]) # Now test against convolve() convolve_result = convolve( array, masked_kernel, boundary="fill", fill_value=0.0 ) assert_floatclose(convolve_result, result) def test_normalize_function(self): """ Check if convolve_fft works when passing a normalize function. """ array = [1, 2, 3] kernel = [3, 3, 3] result = convolve_fft(array, kernel, normalize_kernel=np.max) assert_floatclose(result, [3, 6, 5]) @pytest.mark.parametrize(option_names, options) def test_normalization_is_respected( self, boundary, nan_treatment, normalize_kernel, dealias ): """ Check that if normalize_kernel is False then the normalization tolerance is respected. """ array = np.array([1, 2, 3]) # A simple identity kernel to which a non-zero normalization is added. base_kernel = np.array([1.0]) # Use the same normalization error tolerance in all cases. normalization_rtol = 1e-4 # Add the error below to the kernel. norm_error = [normalization_rtol / 10, normalization_rtol * 10] for err in norm_error: kernel = base_kernel + err result = convolve_fft( array, kernel, normalize_kernel=normalize_kernel, nan_treatment=nan_treatment, normalization_zero_tol=normalization_rtol, ) if normalize_kernel: # Kernel has been normalized to 1. assert_floatclose(result, array) else: # Kernel should not have been normalized... assert_floatclose(result, array * kernel) class TestConvolve2D: @pytest.mark.parametrize(option_names, options) def test_unity_1x1_none(self, boundary, nan_treatment, normalize_kernel, dealias): """ Test that a 1x1 unit kernel returns the same array """ x = np.array( [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype="float64" ) y = np.array([[1.0]], dtype="float64") with expected_boundary_warning(boundary=boundary): with expected_dealias_error(boundary=boundary, dealias=dealias): z = convolve_fft( x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, dealias=dealias, ) assert_floatclose(z, x) @pytest.mark.parametrize(option_names, options) def test_unity_3x3(self, boundary, nan_treatment, normalize_kernel, dealias): """ Test that a 3x3 unit kernel returns the same array (except when boundary is None). """ x = np.array( [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype="float64" ) y = np.array( [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]], dtype="float64" ) with expected_boundary_warning(boundary=boundary): with expected_dealias_error(boundary=boundary, dealias=dealias): z = convolve_fft( x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, dealias=dealias, ) assert_floatclose(z, x) @pytest.mark.parametrize(option_names, options) def test_uniform_3x3(self, boundary, nan_treatment, normalize_kernel, dealias): """ Test that the different modes are producing the correct results using a 3x3 uniform kernel. """ x = np.array( [[0.0, 0.0, 3.0], [1.0, 0.0, 0.0], [0.0, 2.0, 0.0]], dtype="float64" ) y = np.array( [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], dtype="float64" ) with expected_boundary_warning(boundary=boundary): with expected_dealias_error(boundary=boundary, dealias=dealias): z = convolve_fft( x, y, boundary=boundary, nan_treatment=nan_treatment, fill_value=np.nan if normalize_kernel else 0, normalize_kernel=normalize_kernel, dealias=dealias, ) w = np.array( [[4.0, 6.0, 4.0], [6.0, 9.0, 6.0], [4.0, 6.0, 4.0]], dtype="float64" ) answer_dict = { "sum": np.array( [[1.0, 4.0, 3.0], [3.0, 6.0, 5.0], [3.0, 3.0, 2.0]], dtype="float64", ), "sum_wrap": np.array( [[6.0, 6.0, 6.0], [6.0, 6.0, 6.0], [6.0, 6.0, 6.0]], dtype="float64", ), } answer_dict["average"] = answer_dict["sum"] / w answer_dict["average_wrap"] = answer_dict["sum_wrap"] / 9.0 answer_dict["average_withzeros"] = answer_dict["sum"] / 9.0 answer_dict["sum_withzeros"] = answer_dict["sum"] if normalize_kernel: answer_key = "average" else: answer_key = "sum" if boundary == "wrap": answer_key += "_wrap" elif nan_treatment == "fill": answer_key += "_withzeros" a = answer_dict[answer_key] assert_floatclose(z, a) @pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan) def test_unity_3x3_withnan( self, boundary, nan_treatment, normalize_kernel, preserve_nan ): """ Test that a 3x3 unit kernel returns the same array (except when boundary is None). This version includes a NaN value in the original array. """ x = np.array( [[1.0, 2.0, 3.0], [4.0, np.nan, 6.0], [7.0, 8.0, 9.0]], dtype="float64" ) y = np.array( [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]], dtype="float64" ) with expected_boundary_warning(boundary=boundary): z = convolve_fft( x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, preserve_nan=preserve_nan, ) if preserve_nan: assert np.isnan(z[1, 1]) z = np.nan_to_num(z) x = np.nan_to_num(x) assert_floatclose(z, x) @pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan) def test_uniform_3x3_withnan( self, boundary, nan_treatment, normalize_kernel, preserve_nan ): """ Test that the different modes are producing the correct results using a 3x3 uniform kernel. This version includes a NaN value in the original array. """ x = np.array( [[0.0, 0.0, 3.0], [1.0, np.nan, 0.0], [0.0, 2.0, 0.0]], dtype="float64" ) y = np.array( [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], dtype="float64" ) # commented out: allow unnormalized nan-ignoring convolution # # kernel is not normalized, so this situation -> exception # if nan_treatment and not normalize_kernel: # with pytest.raises(ValueError): # z = convolve_fft(x, y, boundary=boundary, # nan_treatment=nan_treatment, # normalize_kernel=normalize_kernel, # ignore_edge_zeros=ignore_edge_zeros, # ) # return with expected_boundary_warning(boundary=boundary): z = convolve_fft( x, y, boundary=boundary, nan_treatment=nan_treatment, # you cannot fill w/nan, you can only interpolate over it fill_value=( np.nan if normalize_kernel and nan_treatment == "interpolate" else 0 ), normalize_kernel=normalize_kernel, preserve_nan=preserve_nan, ) if preserve_nan: assert np.isnan(z[1, 1]) # weights w_n = np.array( [[3.0, 5.0, 3.0], [5.0, 8.0, 5.0], [3.0, 5.0, 3.0]], dtype="float64" ) w_z = np.array( [[4.0, 6.0, 4.0], [6.0, 9.0, 6.0], [4.0, 6.0, 4.0]], dtype="float64" ) answer_dict = { "sum": np.array( [[1.0, 4.0, 3.0], [3.0, 6.0, 5.0], [3.0, 3.0, 2.0]], dtype="float64" ), "sum_wrap": np.array( [[6.0, 6.0, 6.0], [6.0, 6.0, 6.0], [6.0, 6.0, 6.0]], dtype="float64" ), } answer_dict["average"] = answer_dict["sum"] / w_z answer_dict["average_interpnan"] = answer_dict["sum"] / w_n answer_dict["average_wrap_interpnan"] = answer_dict["sum_wrap"] / 8.0 answer_dict["average_wrap"] = answer_dict["sum_wrap"] / 9.0 answer_dict["average_withzeros"] = answer_dict["sum"] / 9.0 answer_dict["average_withzeros_interpnan"] = answer_dict["sum"] / 8.0 answer_dict["sum_withzeros"] = answer_dict["sum"] answer_dict["sum_interpnan"] = answer_dict["sum"] * 9 / 8.0 answer_dict["sum_withzeros_interpnan"] = answer_dict["sum"] answer_dict["sum_wrap_interpnan"] = answer_dict["sum_wrap"] * 9 / 8.0 if normalize_kernel: answer_key = "average" else: answer_key = "sum" if boundary == "wrap": answer_key += "_wrap" elif nan_treatment == "fill": answer_key += "_withzeros" if nan_treatment == "interpolate": answer_key += "_interpnan" answer_dict[answer_key] # Skip the NaN at [1, 1] when preserve_nan=True posns = np.where(np.isfinite(z)) # for reasons unknown, the Windows FFT returns an answer for the [0, 0] # component that is EXACTLY 10*np.spacing assert_floatclose(z[posns], z[posns]) def test_big_fail(self): """ Test that convolve_fft raises an exception if a too-large array is passed in. """ with pytest.raises((ValueError, MemoryError)): # while a good idea, this approach did not work; it actually writes to disk # arr = np.memmap('file.np', mode='w+', shape=(512, 512, 512), dtype=complex) # this just allocates the memory but never touches it; it's better: arr = np.empty([512, 512, 512], dtype=complex) # note 512**3 * 16 bytes = 2.0 GB convolve_fft(arr, arr) def test_padding(self): """ Test that convolve_fft pads to _next_fast_lengths and does not expand all dimensions to length of longest side (#11242/#10047). """ # old implementation expanded this to up to 2048**3 shape = (1, 1226, 518) img = np.zeros(shape, dtype="float64") img[0, 600:610, 300:304] = 1.0 kernel = np.zeros((1, 7, 7), dtype="float64") kernel[0, 3, 3] = 1.0 with pytest.warns( AstropyUserWarning, match="psf_pad was set to False, which overrides the boundary='fill'", ): img_fft = convolve_fft( img, kernel, return_fft=True, psf_pad=False, fft_pad=False ) assert_array_equal(img_fft.shape, shape) img_fft = convolve_fft( img, kernel, return_fft=True, psf_pad=False, fft_pad=True ) # should be from either hardcoded _good_sizes[] or scipy.fft.next_fast_len() assert img_fft.shape in ((1, 1250, 540), (1, 1232, 525)) img_fft = convolve_fft( img, kernel, return_fft=True, psf_pad=True, fft_pad=False ) assert_array_equal(img_fft.shape, np.array(shape) + np.array(kernel.shape)) img_fft = convolve_fft(img, kernel, return_fft=True, psf_pad=True, fft_pad=True) assert img_fft.shape in ((2, 1250, 540), (2, 1250, 525)) @pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS) def test_non_normalized_kernel(self, boundary): x = np.array([[0.0, 0.0, 4.0], [1.0, 2.0, 0.0], [0.0, 3.0, 0.0]], dtype="float") y = np.array( [[1.0, -1.0, 1.0], [-1.0, 0.0, -1.0], [1.0, -1.0, 1.0]], dtype="float" ) with expected_boundary_warning(boundary=boundary): z = convolve_fft( x, y, boundary=boundary, nan_treatment="fill", normalize_kernel=False ) if boundary in (None, "fill"): assert_floatclose( z, np.array( [[1.0, -5.0, 2.0], [1.0, 0.0, -3.0], [-2.0, -1.0, -1.0]], dtype="float", ), ) elif boundary == "wrap": assert_floatclose( z, np.array( [[0.0, -8.0, 6.0], [5.0, 0.0, -4.0], [2.0, 3.0, -4.0]], dtype="float", ), ) else: raise ValueError("Invalid boundary specification") @pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS) def test_asymmetric_kernel(boundary): """ Make sure that asymmetric convolution functions go the right direction """ x = np.array([3.0, 0.0, 1.0], dtype=">f8") y = np.array([1, 2, 3], dtype=">f8") with expected_boundary_warning(boundary=boundary): z = convolve_fft(x, y, boundary=boundary, normalize_kernel=False) if boundary in (None, "fill"): assert_array_almost_equal_nulp(z, np.array([6.0, 10.0, 2.0], dtype="float"), 10) elif boundary == "wrap": assert_array_almost_equal_nulp(z, np.array([9.0, 10.0, 5.0], dtype="float"), 10) @pytest.mark.parametrize( ("boundary", "nan_treatment", "normalize_kernel", "preserve_nan", "dtype"), itertools.product( BOUNDARY_OPTIONS, NANTREATMENT_OPTIONS, NORMALIZE_OPTIONS, PRESERVE_NAN_OPTIONS, VALID_DTYPES, ), ) def test_input_unmodified( boundary, nan_treatment, normalize_kernel, preserve_nan, dtype ): """ Test that convolve_fft works correctly when inputs are lists """ array = [1.0, 4.0, 5.0, 6.0, 5.0, 7.0, 8.0] kernel = [0.2, 0.6, 0.2] x = np.array(array, dtype=dtype) y = np.array(kernel, dtype=dtype) # Make pseudoimmutable x.flags.writeable = False y.flags.writeable = False with expected_boundary_warning(boundary=boundary): convolve_fft( x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, preserve_nan=preserve_nan, ) assert np.all(np.array(array, dtype=dtype) == x) assert np.all(np.array(kernel, dtype=dtype) == y) @pytest.mark.parametrize( ("boundary", "nan_treatment", "normalize_kernel", "preserve_nan", "dtype"), itertools.product( BOUNDARY_OPTIONS, NANTREATMENT_OPTIONS, NORMALIZE_OPTIONS, PRESERVE_NAN_OPTIONS, VALID_DTYPES, ), ) def test_input_unmodified_with_nan( boundary, nan_treatment, normalize_kernel, preserve_nan, dtype ): """ Test that convolve_fft doesn't modify the input data """ array = [1.0, 4.0, 5.0, np.nan, 5.0, 7.0, 8.0] kernel = [0.2, 0.6, 0.2] x = np.array(array, dtype=dtype) y = np.array(kernel, dtype=dtype) # Make pseudoimmutable x.flags.writeable = False y.flags.writeable = False # make copies for post call comparison x_copy = x.copy() y_copy = y.copy() with expected_boundary_warning(boundary=boundary): convolve_fft( x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, preserve_nan=preserve_nan, ) # ( NaN == NaN ) = False # Only compare non NaN values for canonical equivalence # and then check NaN explicitly with np.isnan() array_is_nan = np.isnan(array) kernel_is_nan = np.isnan(kernel) array_not_nan = ~array_is_nan kernel_not_nan = ~kernel_is_nan assert np.all(x_copy[array_not_nan] == x[array_not_nan]) assert np.all(y_copy[kernel_not_nan] == y[kernel_not_nan]) assert np.all(np.isnan(x[array_is_nan])) assert np.all(np.isnan(y[kernel_is_nan])) @pytest.mark.parametrize( "error_kwarg", [{"psf_pad": True}, {"fft_pad": True}, {"dealias": True}] ) def test_convolve_fft_boundary_wrap_error(error_kwarg): x = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype=">f8") y = np.array([[1.0]], dtype=">f8") assert (convolve_fft(x, y, boundary="wrap") == x).all() with pytest.raises(ValueError) as err: convolve_fft(x, y, boundary="wrap", **error_kwarg) assert ( str(err.value) == f"With boundary='wrap', {list(error_kwarg.keys())[0]} cannot be enabled." ) def test_convolve_fft_boundary_extend_error(): x = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype=">f8") y = np.array([[1.0]], dtype=">f8") with pytest.raises( NotImplementedError, match=r"The 'extend' option is not implemented for fft-based convolution", ): convolve_fft(x, y, boundary="extend")
e907bc0b35c534bd9e91df35d41858995051e19f23a0f1a0da039575258703ed
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np import pytest from astropy import convolution as conv from astropy.tests.helper import check_pickling_recovery, pickle_protocol # noqa: F401 @pytest.mark.parametrize( ("name", "args", "kwargs", "xfail"), [ (conv.CustomKernel, [], {"array": np.random.rand(15)}, False), (conv.Gaussian1DKernel, [1.0], {"x_size": 5}, True), (conv.Gaussian2DKernel, [1.0], {"x_size": 5, "y_size": 5}, True), ], ) def test_simple_object(pickle_protocol, name, args, kwargs, xfail): # noqa: F811 # Tests easily instantiated objects if xfail: pytest.xfail() original = name(*args, **kwargs) check_pickling_recovery(original, pickle_protocol)
ab063765f755b9858e04ea554a5aa508497c37d5b33287318d1ca7943e0afb90
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np import pytest from astropy.convolution.convolve import convolve, convolve_fft from astropy.convolution.kernels import Gaussian2DKernel from astropy.nddata import NDData def test_basic_nddata(): arr = np.zeros((11, 11)) arr[5, 5] = 1 ndd = NDData(arr) test_kernel = Gaussian2DKernel(1) result = convolve(ndd, test_kernel) x, y = np.mgrid[:11, :11] expected = result[5, 5] * np.exp(-0.5 * ((x - 5) ** 2 + (y - 5) ** 2)) np.testing.assert_allclose(result, expected, atol=1e-6) resultf = convolve_fft(ndd, test_kernel) np.testing.assert_allclose(resultf, expected, atol=1e-6) @pytest.mark.parametrize( "convfunc", [ lambda *args: convolve( *args, nan_treatment="interpolate", normalize_kernel=True ), lambda *args: convolve_fft( *args, nan_treatment="interpolate", normalize_kernel=True ), ], ) def test_masked_nddata(convfunc): arr = np.zeros((11, 11)) arr[4, 5] = arr[6, 5] = arr[5, 4] = arr[5, 6] = 0.2 arr[5, 5] = 1.5 ndd_base = NDData(arr) mask = arr < 0 # this is all False mask[5, 5] = True ndd_mask = NDData(arr, mask=mask) arrnan = arr.copy() arrnan[5, 5] = np.nan ndd_nan = NDData(arrnan) test_kernel = Gaussian2DKernel(1) result_base = convfunc(ndd_base, test_kernel) result_nan = convfunc(ndd_nan, test_kernel) result_mask = convfunc(ndd_mask, test_kernel) assert np.allclose(result_nan, result_mask) assert not np.allclose(result_base, result_mask) assert not np.allclose(result_base, result_nan) # check to make sure the mask run doesn't talk back to the initial array assert np.sum(np.isnan(ndd_base.data)) != np.sum(np.isnan(ndd_nan.data))
4bed9fde54bb39ced7b3f2ca39130bb9c5014d2609621b53631396649b6931ed
# Licensed under a 3-clause BSD style license - see LICENSE.rst import itertools import numpy as np import pytest from numpy.testing import assert_allclose, assert_almost_equal from astropy import units as u from astropy.convolution.convolve import convolve, convolve_fft from astropy.convolution.kernels import ( Box2DKernel, Gaussian2DKernel, Moffat2DKernel, Tophat2DKernel, ) SHAPES_ODD = [[15, 15], [31, 31]] SHAPES_EVEN = [[8, 8], [16, 16], [32, 32]] # FIXME: not used ?! NOSHAPE = [[None, None]] WIDTHS = [2, 3, 4, 5] KERNELS = [] for shape in SHAPES_ODD + NOSHAPE: for width in WIDTHS: KERNELS.append( Gaussian2DKernel( x_stddev=width, x_size=shape[0], y_size=shape[1], mode="oversample", factor=10, ) ) KERNELS.append( Box2DKernel( width=width, x_size=shape[0], y_size=shape[1], mode="oversample", factor=10, ) ) KERNELS.append( Tophat2DKernel( radius=width, x_size=shape[0], y_size=shape[1], mode="oversample", factor=10, ) ) KERNELS.append( Moffat2DKernel( gamma=width, alpha=2, x_size=shape[0], y_size=shape[1], mode="oversample", factor=10, ) ) class Test2DConvolutions: @pytest.mark.parametrize("kernel", KERNELS) def test_centered_makekernel(self, kernel): """ Test smoothing of an image with a single positive pixel """ shape = kernel.array.shape x = np.zeros(shape) xslice = tuple(slice(sh // 2, sh // 2 + 1) for sh in shape) x[xslice] = 1.0 c2 = convolve_fft(x, kernel, boundary="fill") c1 = convolve(x, kernel, boundary="fill") assert_almost_equal(c1, c2, decimal=12) @pytest.mark.parametrize("kernel", KERNELS) def test_random_makekernel(self, kernel): """ Test smoothing of an image made of random noise """ shape = kernel.array.shape x = np.random.randn(*shape) c2 = convolve_fft(x, kernel, boundary="fill") c1 = convolve(x, kernel, boundary="fill") # not clear why, but these differ by a couple ulps... assert_almost_equal(c1, c2, decimal=12) @pytest.mark.parametrize( ("shape", "width"), list(itertools.product(SHAPES_ODD, WIDTHS)) ) def test_uniform_smallkernel(self, shape, width): """ Test smoothing of an image with a single positive pixel Uses a simple, small kernel """ if width % 2 == 0: # convolve does not accept odd-shape kernels return kernel = np.ones([width, width]) x = np.zeros(shape) xslice = tuple(slice(sh // 2, sh // 2 + 1) for sh in shape) x[xslice] = 1.0 c2 = convolve_fft(x, kernel, boundary="fill") c1 = convolve(x, kernel, boundary="fill") assert_almost_equal(c1, c2, decimal=12) @pytest.mark.parametrize( ("shape", "width"), list(itertools.product(SHAPES_ODD, [1, 3, 5])) ) def test_smallkernel_Box2DKernel(self, shape, width): """ Test smoothing of an image with a single positive pixel Compares a small uniform kernel to the Box2DKernel """ kernel1 = np.ones([width, width]) / float(width) ** 2 kernel2 = Box2DKernel(width, mode="oversample", factor=10) x = np.zeros(shape) xslice = tuple(slice(sh // 2, sh // 2 + 1) for sh in shape) x[xslice] = 1.0 c2 = convolve_fft(x, kernel2, boundary="fill") c1 = convolve_fft(x, kernel1, boundary="fill") assert_almost_equal(c1, c2, decimal=12) c2 = convolve(x, kernel2, boundary="fill") c1 = convolve(x, kernel1, boundary="fill") assert_almost_equal(c1, c2, decimal=12) def test_gaussian_2d_kernel_quantity(): # Make sure that the angle can be a quantity kernel1 = Gaussian2DKernel(x_stddev=2, y_stddev=4, theta=45 * u.deg) kernel2 = Gaussian2DKernel(x_stddev=2, y_stddev=4, theta=np.pi / 4) assert_allclose(kernel1.array, kernel2.array)
d8b67047eaa34020d6ebd23c76c07c4af020abc41ddbfdca28f87bc72799237c
# Licensed under a 3-clause BSD style license - see LICENSE.rst import itertools import numpy as np import pytest from numpy.testing import assert_allclose, assert_almost_equal from astropy.convolution.convolve import convolve, convolve_fft from astropy.convolution.kernels import ( AiryDisk2DKernel, Box1DKernel, Box2DKernel, CustomKernel, Gaussian1DKernel, Gaussian2DKernel, Kernel1D, Kernel2D, Model1DKernel, Model2DKernel, RickerWavelet1DKernel, RickerWavelet2DKernel, Ring2DKernel, Tophat2DKernel, Trapezoid1DKernel, TrapezoidDisk2DKernel, ) from astropy.convolution.utils import KernelSizeError from astropy.modeling.models import Box2D, Gaussian1D, Gaussian2D from astropy.utils.compat.optional_deps import HAS_SCIPY from astropy.utils.exceptions import AstropyUserWarning WIDTHS_ODD = [3, 5, 7, 9] WIDTHS_EVEN = [2, 4, 8, 16] MODES = ["center", "linear_interp", "oversample", "integrate"] KERNEL_TYPES = [ Gaussian1DKernel, Gaussian2DKernel, Box1DKernel, Box2DKernel, Trapezoid1DKernel, TrapezoidDisk2DKernel, RickerWavelet1DKernel, Tophat2DKernel, AiryDisk2DKernel, Ring2DKernel, ] NUMS = [1, 1.0, np.float32(1.0), np.float64(1.0)] # Test data delta_pulse_1D = np.zeros(81) delta_pulse_1D[40] = 1 delta_pulse_2D = np.zeros((81, 81)) delta_pulse_2D[40, 40] = 1 random_data_1D = np.random.rand(61) random_data_2D = np.random.rand(61, 61) class TestKernels: """ Test class for the built-in convolution kernels. """ @pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy") @pytest.mark.parametrize("width", WIDTHS_ODD) def test_scipy_filter_gaussian(self, width): """ Test GaussianKernel against SciPy ndimage gaussian filter. """ from scipy.ndimage import gaussian_filter gauss_kernel_1D = Gaussian1DKernel(width) gauss_kernel_1D.normalize() gauss_kernel_2D = Gaussian2DKernel(width) gauss_kernel_2D.normalize() astropy_1D = convolve(delta_pulse_1D, gauss_kernel_1D, boundary="fill") astropy_2D = convolve(delta_pulse_2D, gauss_kernel_2D, boundary="fill") scipy_1D = gaussian_filter(delta_pulse_1D, width) scipy_2D = gaussian_filter(delta_pulse_2D, width) assert_almost_equal(astropy_1D, scipy_1D, decimal=12) assert_almost_equal(astropy_2D, scipy_2D, decimal=12) @pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy") @pytest.mark.parametrize("width", WIDTHS_ODD) def test_scipy_filter_gaussian_laplace(self, width): """ Test RickerWavelet kernels against SciPy ndimage gaussian laplace filters. """ from scipy.ndimage import gaussian_laplace ricker_kernel_1D = RickerWavelet1DKernel(width) ricker_kernel_2D = RickerWavelet2DKernel(width) astropy_1D = convolve( delta_pulse_1D, ricker_kernel_1D, boundary="fill", normalize_kernel=False ) astropy_2D = convolve( delta_pulse_2D, ricker_kernel_2D, boundary="fill", normalize_kernel=False ) MESSAGE = r"sum is close to zero" with pytest.raises(Exception, match=MESSAGE): astropy_1D = convolve( delta_pulse_1D, ricker_kernel_1D, boundary="fill", normalize_kernel=True ) with pytest.raises(Exception, match=MESSAGE): astropy_2D = convolve( delta_pulse_2D, ricker_kernel_2D, boundary="fill", normalize_kernel=True ) # The Laplace of Gaussian filter is an inverted Ricker Wavelet filter. scipy_1D = -gaussian_laplace(delta_pulse_1D, width) scipy_2D = -gaussian_laplace(delta_pulse_2D, width) # There is a slight deviation in the normalization. They differ by a # factor of ~1.0000284132604045. The reason is not known. assert_almost_equal(astropy_1D, scipy_1D, decimal=5) assert_almost_equal(astropy_2D, scipy_2D, decimal=5) @pytest.mark.parametrize( ("kernel_type", "width"), list(itertools.product(KERNEL_TYPES, WIDTHS_ODD)) ) def test_delta_data(self, kernel_type, width): """ Test smoothing of an image with a single positive pixel """ if kernel_type == AiryDisk2DKernel and not HAS_SCIPY: pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy") if not kernel_type == Ring2DKernel: kernel = kernel_type(width) else: kernel = kernel_type(width, width * 0.2) if kernel.dimension == 1: c1 = convolve_fft( delta_pulse_1D, kernel, boundary="fill", normalize_kernel=False ) c2 = convolve( delta_pulse_1D, kernel, boundary="fill", normalize_kernel=False ) assert_almost_equal(c1, c2, decimal=12) else: c1 = convolve_fft( delta_pulse_2D, kernel, boundary="fill", normalize_kernel=False ) c2 = convolve( delta_pulse_2D, kernel, boundary="fill", normalize_kernel=False ) assert_almost_equal(c1, c2, decimal=12) @pytest.mark.parametrize( ("kernel_type", "width"), list(itertools.product(KERNEL_TYPES, WIDTHS_ODD)) ) def test_random_data(self, kernel_type, width): """ Test smoothing of an image made of random noise """ if kernel_type == AiryDisk2DKernel and not HAS_SCIPY: pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy") if not kernel_type == Ring2DKernel: kernel = kernel_type(width) else: kernel = kernel_type(width, width * 0.2) if kernel.dimension == 1: c1 = convolve_fft( random_data_1D, kernel, boundary="fill", normalize_kernel=False ) c2 = convolve( random_data_1D, kernel, boundary="fill", normalize_kernel=False ) assert_almost_equal(c1, c2, decimal=12) else: c1 = convolve_fft( random_data_2D, kernel, boundary="fill", normalize_kernel=False ) c2 = convolve( random_data_2D, kernel, boundary="fill", normalize_kernel=False ) assert_almost_equal(c1, c2, decimal=12) @pytest.mark.parametrize("width", WIDTHS_ODD) def test_uniform_smallkernel(self, width): """ Test smoothing of an image with a single positive pixel Instead of using kernel class, uses a simple, small kernel """ kernel = np.ones([width, width]) c2 = convolve_fft(delta_pulse_2D, kernel, boundary="fill") c1 = convolve(delta_pulse_2D, kernel, boundary="fill") assert_almost_equal(c1, c2, decimal=12) @pytest.mark.parametrize("width", WIDTHS_ODD) def test_smallkernel_vs_Box2DKernel(self, width): """ Test smoothing of an image with a single positive pixel """ kernel1 = np.ones([width, width]) / width**2 kernel2 = Box2DKernel(width) c2 = convolve_fft(delta_pulse_2D, kernel2, boundary="fill") c1 = convolve_fft(delta_pulse_2D, kernel1, boundary="fill") assert_almost_equal(c1, c2, decimal=12) def test_convolve_1D_kernels(self): """ Check if convolving two kernels with each other works correctly. """ gauss_1 = Gaussian1DKernel(3) gauss_2 = Gaussian1DKernel(4) test_gauss_3 = Gaussian1DKernel(5) with pytest.warns( AstropyUserWarning, match=r"Both array and kernel " r"are Kernel instances" ): gauss_3 = convolve(gauss_1, gauss_2) assert np.all(np.abs((gauss_3 - test_gauss_3).array) < 0.01) def test_convolve_2D_kernels(self): """ Check if convolving two kernels with each other works correctly. """ gauss_1 = Gaussian2DKernel(3) gauss_2 = Gaussian2DKernel(4) test_gauss_3 = Gaussian2DKernel(5) with pytest.warns( AstropyUserWarning, match=r"Both array and kernel " r"are Kernel instances" ): gauss_3 = convolve(gauss_1, gauss_2) assert np.all(np.abs((gauss_3 - test_gauss_3).array) < 0.01) @pytest.mark.parametrize("number", NUMS) def test_multiply_scalar(self, number): """ Check if multiplying a kernel with a scalar works correctly. """ gauss = Gaussian1DKernel(3) gauss_new = number * gauss assert_almost_equal(gauss_new.array, gauss.array * number, decimal=12) @pytest.mark.parametrize("number", NUMS) def test_multiply_scalar_type(self, number): """ Check if multiplying a kernel with a scalar works correctly. """ gauss = Gaussian1DKernel(3) gauss_new = number * gauss assert type(gauss_new) is Gaussian1DKernel @pytest.mark.parametrize("number", NUMS) def test_rmultiply_scalar_type(self, number): """ Check if multiplying a kernel with a scalar works correctly. """ gauss = Gaussian1DKernel(3) gauss_new = gauss * number assert type(gauss_new) is Gaussian1DKernel def test_multiply_kernel1d(self): """Test that multiplying two 1D kernels raises an exception.""" gauss = Gaussian1DKernel(3) with pytest.raises(Exception): gauss * gauss def test_multiply_kernel2d(self): """Test that multiplying two 2D kernels raises an exception.""" gauss = Gaussian2DKernel(3) with pytest.raises(Exception): gauss * gauss def test_multiply_kernel1d_kernel2d(self): """ Test that multiplying a 1D kernel with a 2D kernel raises an exception. """ with pytest.raises(Exception): Gaussian1DKernel(3) * Gaussian2DKernel(3) def test_add_kernel_scalar(self): """Test that adding a scalar to a kernel raises an exception.""" with pytest.raises(Exception): Gaussian1DKernel(3) + 1 def test_model_1D_kernel(self): """ Check Model1DKernel against Gaussian1Dkernel """ stddev = 5.0 gauss = Gaussian1D(1.0 / np.sqrt(2 * np.pi * stddev**2), 0, stddev) model_gauss_kernel = Model1DKernel(gauss, x_size=21) model_gauss_kernel.normalize() gauss_kernel = Gaussian1DKernel(stddev, x_size=21) assert_almost_equal(model_gauss_kernel.array, gauss_kernel.array, decimal=12) def test_model_2D_kernel(self): """ Check Model2DKernel against Gaussian2Dkernel """ stddev = 5.0 gauss = Gaussian2D(1.0 / (2 * np.pi * stddev**2), 0, 0, stddev, stddev) model_gauss_kernel = Model2DKernel(gauss, x_size=21) model_gauss_kernel.normalize() gauss_kernel = Gaussian2DKernel(stddev, x_size=21) assert_almost_equal(model_gauss_kernel.array, gauss_kernel.array, decimal=12) def test_custom_1D_kernel(self): """ Check CustomKernel against Box1DKernel. """ # Define one dimensional array: array = np.ones(5) custom = CustomKernel(array) custom.normalize() box = Box1DKernel(5) c2 = convolve(delta_pulse_1D, custom, boundary="fill") c1 = convolve(delta_pulse_1D, box, boundary="fill") assert_almost_equal(c1, c2, decimal=12) def test_custom_2D_kernel(self): """ Check CustomKernel against Box2DKernel. """ # Define one dimensional array: array = np.ones((5, 5)) custom = CustomKernel(array) custom.normalize() box = Box2DKernel(5) c2 = convolve(delta_pulse_2D, custom, boundary="fill") c1 = convolve(delta_pulse_2D, box, boundary="fill") assert_almost_equal(c1, c2, decimal=12) def test_custom_1D_kernel_list(self): """ Check if CustomKernel works with lists. """ custom = CustomKernel([1, 1, 1, 1, 1]) assert custom.is_bool is True def test_custom_2D_kernel_list(self): """ Check if CustomKernel works with lists. """ custom = CustomKernel([[1, 1, 1], [1, 1, 1], [1, 1, 1]]) assert custom.is_bool is True def test_custom_1D_kernel_zerosum(self): """ Check if CustomKernel works when the input array/list sums to zero. """ array = [-2, -1, 0, 1, 2] custom = CustomKernel(array) with pytest.warns( AstropyUserWarning, match=r"kernel cannot be " r"normalized because it sums to zero", ): custom.normalize() assert custom.truncation == 1.0 assert custom._kernel_sum == 0.0 def test_custom_2D_kernel_zerosum(self): """ Check if CustomKernel works when the input array/list sums to zero. """ array = [[0, -1, 0], [-1, 4, -1], [0, -1, 0]] custom = CustomKernel(array) with pytest.warns( AstropyUserWarning, match=r"kernel cannot be " r"normalized because it sums to zero", ): custom.normalize() assert custom.truncation == 1.0 assert custom._kernel_sum == 0.0 def test_custom_kernel_odd_error(self): """ Check if CustomKernel raises if the array size is odd. """ with pytest.raises(KernelSizeError): CustomKernel([1, 1, 1, 1]) def test_add_1D_kernels(self): """ Check if adding of two 1D kernels works. """ box_1 = Box1DKernel(5) box_2 = Box1DKernel(3) box_3 = Box1DKernel(1) box_sum_1 = box_1 + box_2 + box_3 box_sum_2 = box_2 + box_3 + box_1 box_sum_3 = box_3 + box_1 + box_2 ref = [ 1 / 5.0, 1 / 5.0 + 1 / 3.0, 1 + 1 / 3.0 + 1 / 5.0, 1 / 5.0 + 1 / 3.0, 1 / 5.0, ] assert_almost_equal(box_sum_1.array, ref, decimal=12) assert_almost_equal(box_sum_2.array, ref, decimal=12) assert_almost_equal(box_sum_3.array, ref, decimal=12) # Assert that the kernels haven't changed assert_almost_equal(box_1.array, [0.2, 0.2, 0.2, 0.2, 0.2], decimal=12) assert_almost_equal(box_2.array, [1 / 3.0, 1 / 3.0, 1 / 3.0], decimal=12) assert_almost_equal(box_3.array, [1], decimal=12) def test_add_2D_kernels(self): """ Check if adding of two 1D kernels works. """ box_1 = Box2DKernel(3) box_2 = Box2DKernel(1) box_sum_1 = box_1 + box_2 box_sum_2 = box_2 + box_1 ref = [ [1 / 9.0, 1 / 9.0, 1 / 9.0], [1 / 9.0, 1 + 1 / 9.0, 1 / 9.0], [1 / 9.0, 1 / 9.0, 1 / 9.0], ] ref_1 = [ [1 / 9.0, 1 / 9.0, 1 / 9.0], [1 / 9.0, 1 / 9.0, 1 / 9.0], [1 / 9.0, 1 / 9.0, 1 / 9.0], ] assert_almost_equal(box_2.array, [[1]], decimal=12) assert_almost_equal(box_1.array, ref_1, decimal=12) assert_almost_equal(box_sum_1.array, ref, decimal=12) assert_almost_equal(box_sum_2.array, ref, decimal=12) def test_Gaussian1DKernel_even_size(self): """ Check if even size for GaussianKernel works. """ gauss = Gaussian1DKernel(3, x_size=10) assert gauss.array.size == 10 def test_Gaussian2DKernel_even_size(self): """ Check if even size for GaussianKernel works. """ gauss = Gaussian2DKernel(3, x_size=10, y_size=10) assert gauss.array.shape == (10, 10) # https://github.com/astropy/astropy/issues/3605 def test_Gaussian2DKernel_rotated(self): gauss = Gaussian2DKernel( x_stddev=3, y_stddev=1.5, theta=0.7853981633974483, x_size=5, y_size=5 ) # rotated 45 deg ccw ans = [ [0.04087193, 0.04442386, 0.03657381, 0.02280797, 0.01077372], [0.04442386, 0.05704137, 0.05547869, 0.04087193, 0.02280797], [0.03657381, 0.05547869, 0.06374482, 0.05547869, 0.03657381], [0.02280797, 0.04087193, 0.05547869, 0.05704137, 0.04442386], [0.01077372, 0.02280797, 0.03657381, 0.04442386, 0.04087193], ] assert_allclose(gauss, ans, rtol=0.001) # Rough comparison at 0.1 % def test_normalize_peak(self): """ Check if normalize works with peak mode. """ custom = CustomKernel([1, 2, 3, 2, 1]) custom.normalize(mode="peak") assert custom.array.max() == 1 def test_check_kernel_attributes(self): """ Check if kernel attributes are correct. """ box = Box2DKernel(5) # Check truncation assert box.truncation == 0 # Check model assert isinstance(box.model, Box2D) # Check center assert box.center == [2, 2] # Check normalization box.normalize() assert_almost_equal(box._kernel_sum, 1.0, decimal=12) # Check separability assert box.separable @pytest.mark.parametrize( ("kernel_type", "mode"), list(itertools.product(KERNEL_TYPES, MODES)) ) def test_discretize_modes(self, kernel_type, mode): """ Check if the different modes result in kernels that work with convolve. Use only small kernel width, to make the test pass quickly. """ if kernel_type == AiryDisk2DKernel and not HAS_SCIPY: pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy") if not kernel_type == Ring2DKernel: kernel = kernel_type(3) else: kernel = kernel_type(3, 3 * 0.2) if kernel.dimension == 1: c1 = convolve_fft( delta_pulse_1D, kernel, boundary="fill", normalize_kernel=False ) c2 = convolve( delta_pulse_1D, kernel, boundary="fill", normalize_kernel=False ) assert_almost_equal(c1, c2, decimal=12) else: c1 = convolve_fft( delta_pulse_2D, kernel, boundary="fill", normalize_kernel=False ) c2 = convolve( delta_pulse_2D, kernel, boundary="fill", normalize_kernel=False ) assert_almost_equal(c1, c2, decimal=12) @pytest.mark.parametrize("width", WIDTHS_EVEN) def test_box_kernels_even_size(self, width): """ Check if BoxKernel work properly with even sizes. """ kernel_1D = Box1DKernel(width) assert kernel_1D.shape[0] % 2 != 0 assert kernel_1D.array.sum() == 1.0 kernel_2D = Box2DKernel(width) assert np.all([_ % 2 != 0 for _ in kernel_2D.shape]) assert kernel_2D.array.sum() == 1.0 def test_kernel_normalization(self): """ Test that repeated normalizations do not change the kernel [#3747]. """ kernel = CustomKernel(np.ones(5)) kernel.normalize() data = np.copy(kernel.array) kernel.normalize() assert_allclose(data, kernel.array) kernel.normalize() assert_allclose(data, kernel.array) def test_kernel_normalization_mode(self): """ Test that an error is raised if mode is invalid. """ with pytest.raises(ValueError): kernel = CustomKernel(np.ones(3)) kernel.normalize(mode="invalid") def test_kernel1d_int_size(self): """ Test that an error is raised if ``Kernel1D`` ``x_size`` is not an integer. """ with pytest.raises(TypeError): Gaussian1DKernel(3, x_size=1.2) def test_kernel2d_int_xsize(self): """ Test that an error is raised if ``Kernel2D`` ``x_size`` is not an integer. """ with pytest.raises(TypeError): Gaussian2DKernel(3, x_size=1.2) def test_kernel2d_int_ysize(self): """ Test that an error is raised if ``Kernel2D`` ``y_size`` is not an integer. """ with pytest.raises(TypeError): Gaussian2DKernel(3, x_size=5, y_size=1.2) def test_kernel1d_initialization(self): """ Test that an error is raised if an array or model is not specified for ``Kernel1D``. """ with pytest.raises(TypeError): Kernel1D() def test_kernel2d_initialization(self): """ Test that an error is raised if an array or model is not specified for ``Kernel2D``. """ with pytest.raises(TypeError): Kernel2D() def test_array_keyword_not_allowed(self): """ Regression test for issue #10439 """ x = np.ones([10, 10]) with pytest.raises(TypeError, match=r".* allowed .*"): AiryDisk2DKernel(2, array=x) Box1DKernel(2, array=x) Box2DKernel(2, array=x) Gaussian1DKernel(2, array=x) Gaussian2DKernel(2, array=x) RickerWavelet1DKernel(2, array=x) RickerWavelet2DKernel(2, array=x) Model1DKernel(Gaussian1D(1, 0, 2), array=x) Model2DKernel(Gaussian2D(1, 0, 0, 2, 2), array=x) Ring2DKernel(9, 8, array=x) Tophat2DKernel(2, array=x) Trapezoid1DKernel(2, array=x) Trapezoid1DKernel(2, array=x)
c00a46561acac6f1060dbdbfa34045d0f69898dfa1256d9029907babfda1c081
# Licensed under a 3-clause BSD style license - see LICENSE.rst import itertools import numpy as np import pytest from numpy.testing import assert_allclose from astropy.convolution.utils import discretize_model from astropy.modeling.functional_models import ( Box1D, Box2D, Gaussian1D, Gaussian2D, RickerWavelet1D, RickerWavelet2D, ) from astropy.modeling.tests.example_models import models_1D, models_2D from astropy.modeling.tests.test_models import create_model from astropy.utils.compat.optional_deps import HAS_SCIPY modes = ["center", "linear_interp", "oversample"] test_models_1D = [Gaussian1D, Box1D, RickerWavelet1D] test_models_2D = [Gaussian2D, Box2D, RickerWavelet2D] @pytest.mark.parametrize( ("model_class", "mode"), list(itertools.product(test_models_1D, modes)) ) def test_pixel_sum_1D(model_class, mode): """ Test if the sum of all pixels corresponds nearly to the integral. """ if model_class == Box1D and mode == "center": pytest.skip("Non integrating mode. Skip integral test.") parameters = models_1D[model_class] model = create_model(model_class, parameters) values = discretize_model(model, models_1D[model_class]["x_lim"], mode=mode) assert_allclose(values.sum(), models_1D[model_class]["integral"], atol=0.0001) @pytest.mark.parametrize("mode", modes) def test_gaussian_eval_1D(mode): """ Discretize Gaussian with different modes and check if result is at least similar to Gaussian1D.eval(). """ model = Gaussian1D(1, 0, 20) x = np.arange(-100, 101) values = model(x) disc_values = discretize_model(model, (-100, 101), mode=mode) assert_allclose(values, disc_values, atol=0.001) @pytest.mark.parametrize( ("model_class", "mode"), list(itertools.product(test_models_2D, modes)) ) def test_pixel_sum_2D(model_class, mode): """ Test if the sum of all pixels corresponds nearly to the integral. """ if model_class == Box2D and mode == "center": pytest.skip("Non integrating mode. Skip integral test.") parameters = models_2D[model_class] model = create_model(model_class, parameters) values = discretize_model( model, models_2D[model_class]["x_lim"], models_2D[model_class]["y_lim"], mode=mode, ) assert_allclose(values.sum(), models_2D[model_class]["integral"], atol=0.0001) @pytest.mark.parametrize( ("model_class", "mode"), list(itertools.product(test_models_2D, modes)) ) def test_pixel_sum_compound_2D(model_class, mode): """ Test if the sum of all pixels of a compound model corresponds nearly to the integral. """ if model_class == Box2D and mode == "center": pytest.skip("Non integrating mode. Skip integral test.") parameters = models_2D[model_class] model = create_model(model_class, parameters) values = discretize_model( model + model, models_2D[model_class]["x_lim"], models_2D[model_class]["y_lim"], mode=mode, ) model_integral = 2 * models_2D[model_class]["integral"] assert_allclose(values.sum(), model_integral, atol=0.0001) @pytest.mark.parametrize("mode", modes) def test_gaussian_eval_2D(mode): """ Discretize Gaussian with different modes and check if result is at least similar to Gaussian2D.eval() """ model = Gaussian2D(0.01, 0, 0, 1, 1) x = np.arange(-2, 3) y = np.arange(-2, 3) x, y = np.meshgrid(x, y) values = model(x, y) disc_values = discretize_model(model, (-2, 3), (-2, 3), mode=mode) assert_allclose(values, disc_values, atol=1e-2) @pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy") @pytest.mark.slow def test_gaussian_eval_2D_integrate_mode(): """ Discretize Gaussian with integrate mode """ model_list = [ Gaussian2D(0.01, 0, 0, 2, 2), Gaussian2D(0.01, 0, 0, 1, 2), Gaussian2D(0.01, 0, 0, 2, 1), ] x = np.arange(-2, 3) y = np.arange(-2, 3) x, y = np.meshgrid(x, y) for model in model_list: values = model(x, y) disc_values = discretize_model(model, (-2, 3), (-2, 3), mode="integrate") assert_allclose(values, disc_values, atol=1e-2) @pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy") def test_subpixel_gauss_1D(): """ Test subpixel accuracy of the integrate mode with gaussian 1D model. """ gauss_1D = Gaussian1D(1, 0, 0.1) values = discretize_model(gauss_1D, (-1, 2), mode="integrate", factor=100) assert_allclose(values.sum(), np.sqrt(2 * np.pi) * 0.1, atol=0.00001) @pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy") def test_subpixel_gauss_2D(): """ Test subpixel accuracy of the integrate mode with gaussian 2D model. """ gauss_2D = Gaussian2D(1, 0, 0, 0.1, 0.1) values = discretize_model(gauss_2D, (-1, 2), (-1, 2), mode="integrate", factor=100) assert_allclose(values.sum(), 2 * np.pi * 0.01, atol=0.00001) def test_discretize_callable_1d(): """ Test discretize when a 1d function is passed. """ def f(x): return x**2 y = discretize_model(f, (-5, 6)) assert_allclose(y, np.arange(-5, 6) ** 2) def test_discretize_callable_2d(): """ Test discretize when a 2d function is passed. """ def f(x, y): return x**2 + y**2 actual = discretize_model(f, (-5, 6), (-5, 6)) y, x = np.indices((11, 11)) - 5 desired = x**2 + y**2 assert_allclose(actual, desired) def test_type_exception(): """ Test type exception. """ with pytest.raises(TypeError, match=r"Model must be callable\."): discretize_model(float(0), (-10, 11)) def test_dim_exception_1d(): """ Test dimension exception 1d. """ def f(x): return x**2 with pytest.raises(ValueError, match=r"y range specified, but model is only 1-d\."): discretize_model(f, (-10, 11), (-10, 11)) def test_dim_exception_2d(): """ Test dimension exception 2d. """ def f(x, y): return x**2 + y**2 with pytest.raises(ValueError, match=r"y range not specified, but model is 2-d"): discretize_model(f, (-10, 11)) def test_float_x_range_exception(): def f(x, y): return x**2 + y**2 with pytest.raises( ValueError, match=( r"The difference between the upper and lower limit of 'x_range' must be a" r" whole number\." ), ): discretize_model(f, (-10.002, 11.23)) def test_float_y_range_exception(): def f(x, y): return x**2 + y**2 with pytest.raises( ValueError, match=( r"The difference between the upper and lower limit of 'y_range' must be a" r" whole number\." ), ): discretize_model(f, (-10, 11), (-10.002, 11.23)) def test_discretize_oversample(): gauss_2D = Gaussian2D( amplitude=1.0, x_mean=5.0, y_mean=125.0, x_stddev=0.75, y_stddev=3 ) values = discretize_model( gauss_2D, x_range=[0, 10], y_range=[100, 135], mode="oversample", factor=10 ) vmax = np.max(values) vmax_yx = np.unravel_index(values.argmax(), values.shape) values_osf1 = discretize_model( gauss_2D, x_range=[0, 10], y_range=[100, 135], mode="oversample", factor=1 ) values_center = discretize_model( gauss_2D, x_range=[0, 10], y_range=[100, 135], mode="center" ) assert values.shape == (35, 10) assert_allclose(vmax, 0.927, atol=1e-3) assert vmax_yx == (25, 5) assert_allclose(values_center, values_osf1)
c98ce772fa235454880ac8068f4bacb5ce8821e6d0277a9517b3d55806f92769
# Licensed under a 3-clause BSD style license - see LICENSE.rst import itertools from contextlib import nullcontext import numpy as np import numpy.ma as ma import pytest from numpy.testing import ( assert_allclose, assert_array_almost_equal, assert_array_almost_equal_nulp, ) from astropy import units as u from astropy.convolution.convolve import convolve, convolve_fft from astropy.convolution.kernels import Gaussian2DKernel from astropy.utils.compat.optional_deps import HAS_PANDAS, HAS_SCIPY from astropy.utils.exceptions import AstropyUserWarning VALID_DTYPES = (">f4", "<f4", ">f8", "<f8") VALID_DTYPE_MATRIX = list(itertools.product(VALID_DTYPES, VALID_DTYPES)) BOUNDARY_OPTIONS = [None, "fill", "wrap", "extend"] NANHANDLING_OPTIONS = ["interpolate", "fill"] NORMALIZE_OPTIONS = [True, False] PRESERVE_NAN_OPTIONS = [True, False] BOUNDARIES_AND_CONVOLUTIONS = list( zip(itertools.cycle((convolve,)), BOUNDARY_OPTIONS) ) + [(convolve_fft, "wrap"), (convolve_fft, "fill")] class TestConvolve1D: def test_list(self): """ Test that convolve works correctly when inputs are lists """ x = [1, 4, 5, 6, 5, 7, 8] y = [0.2, 0.6, 0.2] z = convolve(x, y, boundary=None) assert_array_almost_equal_nulp( z, np.array([0.0, 3.6, 5.0, 5.6, 5.6, 6.8, 0.0]), 10 ) def test_tuple(self): """ Test that convolve works correctly when inputs are tuples """ x = (1, 4, 5, 6, 5, 7, 8) y = (0.2, 0.6, 0.2) z = convolve(x, y, boundary=None) assert_array_almost_equal_nulp( z, np.array([0.0, 3.6, 5.0, 5.6, 5.6, 6.8, 0.0]), 10 ) @pytest.mark.parametrize( ("boundary", "nan_treatment", "normalize_kernel", "preserve_nan", "dtype"), itertools.product( BOUNDARY_OPTIONS, NANHANDLING_OPTIONS, NORMALIZE_OPTIONS, PRESERVE_NAN_OPTIONS, VALID_DTYPES, ), ) def test_quantity( self, boundary, nan_treatment, normalize_kernel, preserve_nan, dtype ): """ Test that convolve works correctly when input array is a Quantity """ x = np.array([1, 4, 5, 6, 5, 7, 8], dtype=dtype) * u.ph y = np.array([0.2, 0.6, 0.2], dtype=dtype) z = convolve( x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, preserve_nan=preserve_nan, ) assert x.unit == z.unit @pytest.mark.parametrize( ("boundary", "nan_treatment", "normalize_kernel", "preserve_nan", "dtype"), itertools.product( BOUNDARY_OPTIONS, NANHANDLING_OPTIONS, NORMALIZE_OPTIONS, PRESERVE_NAN_OPTIONS, VALID_DTYPES, ), ) def test_input_unmodified( self, boundary, nan_treatment, normalize_kernel, preserve_nan, dtype ): """ Test that convolve works correctly when inputs are lists """ array = [1.0, 4.0, 5.0, 6.0, 5.0, 7.0, 8.0] kernel = [0.2, 0.6, 0.2] x = np.array(array, dtype=dtype) y = np.array(kernel, dtype=dtype) # Make pseudoimmutable x.flags.writeable = False y.flags.writeable = False convolve( x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, preserve_nan=preserve_nan, ) assert np.all(np.array(array, dtype=dtype) == x) assert np.all(np.array(kernel, dtype=dtype) == y) @pytest.mark.parametrize( ("boundary", "nan_treatment", "normalize_kernel", "preserve_nan", "dtype"), itertools.product( BOUNDARY_OPTIONS, NANHANDLING_OPTIONS, NORMALIZE_OPTIONS, PRESERVE_NAN_OPTIONS, VALID_DTYPES, ), ) def test_input_unmodified_with_nan( self, boundary, nan_treatment, normalize_kernel, preserve_nan, dtype ): """ Test that convolve doesn't modify the input data """ array = [1.0, 4.0, 5.0, np.nan, 5.0, 7.0, 8.0] kernel = [0.2, 0.6, 0.2] x = np.array(array, dtype=dtype) y = np.array(kernel, dtype=dtype) # Make pseudoimmutable x.flags.writeable = False y.flags.writeable = False # make copies for post call comparison x_copy = x.copy() y_copy = y.copy() convolve( x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, preserve_nan=preserve_nan, ) # ( NaN == NaN ) = False # Only compare non NaN values for canonical equivalence # and then check NaN explicitly with np.isnan() array_is_nan = np.isnan(array) kernel_is_nan = np.isnan(kernel) array_not_nan = ~array_is_nan kernel_not_nan = ~kernel_is_nan assert np.all(x_copy[array_not_nan] == x[array_not_nan]) assert np.all(y_copy[kernel_not_nan] == y[kernel_not_nan]) assert np.all(np.isnan(x[array_is_nan])) assert np.all(np.isnan(y[kernel_is_nan])) @pytest.mark.parametrize(("dtype_array", "dtype_kernel"), VALID_DTYPE_MATRIX) def test_dtype(self, dtype_array, dtype_kernel): """ Test that 32- and 64-bit floats are correctly handled """ x = np.array([1.0, 2.0, 3.0], dtype=dtype_array) y = np.array([0.0, 1.0, 0.0], dtype=dtype_kernel) z = convolve(x, y) assert x.dtype == z.dtype @pytest.mark.parametrize( ( "convfunc", "boundary", ), BOUNDARIES_AND_CONVOLUTIONS, ) def test_unity_1_none(self, boundary, convfunc): """ Test that a unit kernel with a single element returns the same array """ x = np.array([1.0, 2.0, 3.0], dtype=">f8") y = np.array([1.0], dtype=">f8") z = convfunc(x, y, boundary=boundary) np.testing.assert_allclose(z, x) @pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS) def test_unity_3(self, boundary): """ Test that a unit kernel with three elements returns the same array (except when boundary is None). """ x = np.array([1.0, 2.0, 3.0], dtype=">f8") y = np.array([0.0, 1.0, 0.0], dtype=">f8") z = convolve(x, y, boundary=boundary) if boundary is None: assert np.all(z == np.array([0.0, 2.0, 0.0], dtype=">f8")) else: assert np.all(z == x) @pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS) def test_uniform_3(self, boundary): """ Test that the different modes are producing the correct results using a uniform kernel with three elements """ x = np.array([1.0, 0.0, 3.0], dtype=">f8") y = np.array([1.0, 1.0, 1.0], dtype=">f8") z = convolve(x, y, boundary=boundary, normalize_kernel=False) if boundary is None: assert np.all(z == np.array([0.0, 4.0, 0.0], dtype=">f8")) elif boundary == "fill": assert np.all(z == np.array([1.0, 4.0, 3.0], dtype=">f8")) elif boundary == "wrap": assert np.all(z == np.array([4.0, 4.0, 4.0], dtype=">f8")) else: assert np.all(z == np.array([2.0, 4.0, 6.0], dtype=">f8")) @pytest.mark.parametrize( ("boundary", "nan_treatment", "normalize_kernel", "preserve_nan"), itertools.product( BOUNDARY_OPTIONS, NANHANDLING_OPTIONS, NORMALIZE_OPTIONS, PRESERVE_NAN_OPTIONS, ), ) def test_unity_3_withnan( self, boundary, nan_treatment, normalize_kernel, preserve_nan ): """ Test that a unit kernel with three elements returns the same array (except when boundary is None). This version includes a NaN value in the original array. """ x = np.array([1.0, np.nan, 3.0], dtype=">f8") y = np.array([0.0, 1.0, 0.0], dtype=">f8") # Since the kernel is actually only one pixel wide (because of the # zeros) the NaN value doesn't get interpolated over so a warning is # expected. if nan_treatment == "interpolate" and not preserve_nan: ctx = pytest.warns( AstropyUserWarning, match="nan_treatment='interpolate', however, NaN values detected", ) else: ctx = nullcontext() with ctx: z = convolve( x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, preserve_nan=preserve_nan, ) if preserve_nan: assert np.isnan(z[1]) x = np.nan_to_num(z) z = np.nan_to_num(z) if boundary is None: assert np.all(z == np.array([0.0, 0.0, 0.0], dtype=">f8")) else: assert np.all(z == x) @pytest.mark.parametrize( ("boundary", "nan_treatment", "normalize_kernel", "preserve_nan"), itertools.product( BOUNDARY_OPTIONS, NANHANDLING_OPTIONS, NORMALIZE_OPTIONS, PRESERVE_NAN_OPTIONS, ), ) def test_uniform_3_withnan( self, boundary, nan_treatment, normalize_kernel, preserve_nan ): """ Test that the different modes are producing the correct results using a uniform kernel with three elements. This version includes a NaN value in the original array. """ x = np.array([1.0, np.nan, 3.0], dtype=">f8") y = np.array([1.0, 1.0, 1.0], dtype=">f8") z = convolve( x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, preserve_nan=preserve_nan, ) if preserve_nan: assert np.isnan(z[1]) z = np.nan_to_num(z) # boundary, nan_treatment, normalize_kernel rslt = { (None, "interpolate", True): [0, 2, 0], (None, "interpolate", False): [0, 6, 0], (None, "fill", True): [0, 4 / 3.0, 0], (None, "fill", False): [0, 4, 0], ("fill", "interpolate", True): [1 / 2.0, 2, 3 / 2.0], ("fill", "interpolate", False): [3 / 2.0, 6, 9 / 2.0], ("fill", "fill", True): [1 / 3.0, 4 / 3.0, 3 / 3.0], ("fill", "fill", False): [1, 4, 3], ("wrap", "interpolate", True): [2, 2, 2], ("wrap", "interpolate", False): [6, 6, 6], ("wrap", "fill", True): [4 / 3.0, 4 / 3.0, 4 / 3.0], ("wrap", "fill", False): [4, 4, 4], ("extend", "interpolate", True): [1, 2, 3], ("extend", "interpolate", False): [3, 6, 9], ("extend", "fill", True): [2 / 3.0, 4 / 3.0, 6 / 3.0], ("extend", "fill", False): [2, 4, 6], }[boundary, nan_treatment, normalize_kernel] if preserve_nan: rslt[1] = 0 assert_array_almost_equal_nulp(z, np.array(rslt, dtype=">f8"), 10) @pytest.mark.parametrize( ("boundary", "normalize_kernel"), itertools.product(BOUNDARY_OPTIONS, NORMALIZE_OPTIONS), ) def test_zero_sum_kernel(self, boundary, normalize_kernel): """ Test that convolve works correctly with zero sum kernels. """ if normalize_kernel: pytest.xfail("You can't normalize by a zero sum kernel") x = [1, 2, 3, 4, 5, 6, 7, 8, 9] y = [-1, -1, -1, -1, 8, -1, -1, -1, -1] assert np.isclose(sum(y), 0, atol=1e-8) z = convolve(x, y, boundary=boundary, normalize_kernel=normalize_kernel) # boundary, normalize_kernel == False rslt = { (None): [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "fill": [-6.0, -3.0, -1.0, 0.0, 0.0, 10.0, 21.0, 33.0, 46.0], "wrap": [-36.0, -27.0, -18.0, -9.0, 0.0, 9.0, 18.0, 27.0, 36.0], "extend": [-10.0, -6.0, -3.0, -1.0, 0.0, 1.0, 3.0, 6.0, 10.0], }[boundary] assert_array_almost_equal_nulp(z, np.array(rslt, dtype=">f8"), 10) @pytest.mark.parametrize( ("boundary", "normalize_kernel"), itertools.product(BOUNDARY_OPTIONS, NORMALIZE_OPTIONS), ) def test_int_masked_kernel(self, boundary, normalize_kernel): """ Test that convolve works correctly with integer masked kernels. """ if normalize_kernel: pytest.xfail("You can't normalize by a zero sum kernel") x = [1, 2, 3, 4, 5, 6, 7, 8, 9] y = ma.array( [-1, -1, -1, -1, 8, -1, -1, -1, -1], mask=[1, 0, 0, 0, 0, 0, 0, 0, 0], fill_value=0.0, ) z = convolve(x, y, boundary=boundary, normalize_kernel=normalize_kernel) # boundary, normalize_kernel == False rslt = { (None): [0.0, 0.0, 0.0, 0.0, 9.0, 0.0, 0.0, 0.0, 0.0], "fill": [-1.0, 3.0, 6.0, 8.0, 9.0, 10.0, 21.0, 33.0, 46.0], "wrap": [-31.0, -21.0, -11.0, -1.0, 9.0, 10.0, 20.0, 30.0, 40.0], "extend": [-5.0, 0.0, 4.0, 7.0, 9.0, 10.0, 12.0, 15.0, 19.0], }[boundary] assert_array_almost_equal_nulp(z, np.array(rslt, dtype=">f8"), 10) @pytest.mark.parametrize("preserve_nan", PRESERVE_NAN_OPTIONS) def test_int_masked_array(self, preserve_nan): """ Test that convolve works correctly with integer masked arrays. """ x = ma.array([3, 5, 7, 11, 13], mask=[0, 0, 1, 0, 0], fill_value=0.0) y = np.array([1.0, 1.0, 1.0], dtype=">f8") z = convolve(x, y, preserve_nan=preserve_nan) if preserve_nan: assert np.isnan(z[2]) z[2] = 8 assert_array_almost_equal_nulp(z, (8 / 3.0, 4, 8, 12, 8), 10) class TestConvolve2D: def test_list(self): """ Test that convolve works correctly when inputs are lists """ x = [[1, 1, 1], [1, 1, 1], [1, 1, 1]] z = convolve(x, x, boundary="fill", fill_value=1, normalize_kernel=True) assert_array_almost_equal_nulp(z, x, 10) z = convolve(x, x, boundary="fill", fill_value=1, normalize_kernel=False) assert_array_almost_equal_nulp(z, np.array(x, float) * 9, 10) @pytest.mark.parametrize(("dtype_array", "dtype_kernel"), VALID_DTYPE_MATRIX) def test_dtype(self, dtype_array, dtype_kernel): """ Test that 32- and 64-bit floats are correctly handled """ x = np.array( [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype=dtype_array ) y = np.array( [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]], dtype=dtype_kernel ) z = convolve(x, y) assert x.dtype == z.dtype @pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS) def test_unity_1x1_none(self, boundary): """ Test that a 1x1 unit kernel returns the same array """ x = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype=">f8") y = np.array([[1.0]], dtype=">f8") z = convolve(x, y, boundary=boundary) assert np.all(z == x) @pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS) def test_unity_3x3(self, boundary): """ Test that a 3x3 unit kernel returns the same array (except when boundary is None). """ x = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype=">f8") y = np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]], dtype=">f8") z = convolve(x, y, boundary=boundary) if boundary is None: assert np.all( z == np.array( [[0.0, 0.0, 0.0], [0.0, 5.0, 0.0], [0.0, 0.0, 0.0]], dtype=">f8" ) ) else: assert np.all(z == x) @pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS) def test_uniform_3x3(self, boundary): """ Test that the different modes are producing the correct results using a 3x3 uniform kernel. """ x = np.array([[0.0, 0.0, 3.0], [1.0, 0.0, 0.0], [0.0, 2.0, 0.0]], dtype=">f8") y = np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], dtype=">f8") z = convolve(x, y, boundary=boundary, normalize_kernel=False) if boundary is None: assert_array_almost_equal_nulp( z, np.array( [[0.0, 0.0, 0.0], [0.0, 6.0, 0.0], [0.0, 0.0, 0.0]], dtype=">f8" ), 10, ) elif boundary == "fill": assert_array_almost_equal_nulp( z, np.array( [[1.0, 4.0, 3.0], [3.0, 6.0, 5.0], [3.0, 3.0, 2.0]], dtype=">f8" ), 10, ) elif boundary == "wrap": assert_array_almost_equal_nulp( z, np.array( [[6.0, 6.0, 6.0], [6.0, 6.0, 6.0], [6.0, 6.0, 6.0]], dtype=">f8" ), 10, ) else: assert_array_almost_equal_nulp( z, np.array( [[2.0, 7.0, 12.0], [4.0, 6.0, 8.0], [6.0, 5.0, 4.0]], dtype=">f8" ), 10, ) @pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS) def test_unity_3x3_withnan(self, boundary): """ Test that a 3x3 unit kernel returns the same array (except when boundary is None). This version includes a NaN value in the original array. """ x = np.array( [[1.0, 2.0, 3.0], [4.0, np.nan, 6.0], [7.0, 8.0, 9.0]], dtype=">f8" ) y = np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]], dtype=">f8") z = convolve(x, y, boundary=boundary, nan_treatment="fill", preserve_nan=True) assert np.isnan(z[1, 1]) x = np.nan_to_num(z) z = np.nan_to_num(z) if boundary is None: assert np.all( z == np.array( [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=">f8" ) ) else: assert np.all(z == x) @pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS) def test_uniform_3x3_withnanfilled(self, boundary): """ Test that the different modes are producing the correct results using a 3x3 uniform kernel. This version includes a NaN value in the original array. """ x = np.array( [[0.0, 0.0, 4.0], [1.0, np.nan, 0.0], [0.0, 3.0, 0.0]], dtype=">f8" ) y = np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], dtype=">f8") z = convolve( x, y, boundary=boundary, nan_treatment="fill", normalize_kernel=False ) if boundary is None: assert_array_almost_equal_nulp( z, np.array( [[0.0, 0.0, 0.0], [0.0, 8.0, 0.0], [0.0, 0.0, 0.0]], dtype=">f8" ), 10, ) elif boundary == "fill": assert_array_almost_equal_nulp( z, np.array( [[1.0, 5.0, 4.0], [4.0, 8.0, 7.0], [4.0, 4.0, 3.0]], dtype=">f8" ), 10, ) elif boundary == "wrap": assert_array_almost_equal_nulp( z, np.array( [[8.0, 8.0, 8.0], [8.0, 8.0, 8.0], [8.0, 8.0, 8.0]], dtype=">f8" ), 10, ) elif boundary == "extend": assert_array_almost_equal_nulp( z, np.array( [[2.0, 9.0, 16.0], [5.0, 8.0, 11.0], [8.0, 7.0, 6.0]], dtype=">f8" ), 10, ) else: raise ValueError("Invalid boundary specification") @pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS) def test_uniform_3x3_withnaninterped(self, boundary): """ Test that the different modes are producing the correct results using a 3x3 uniform kernel. This version includes a NaN value in the original array. """ x = np.array( [[0.0, 0.0, 4.0], [1.0, np.nan, 0.0], [0.0, 3.0, 0.0]], dtype=">f8" ) y = np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], dtype=">f8") z = convolve( x, y, boundary=boundary, nan_treatment="interpolate", normalize_kernel=True ) if boundary is None: assert_array_almost_equal_nulp( z, np.array( [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]], dtype=">f8" ), 10, ) elif boundary == "fill": assert_array_almost_equal_nulp( z, np.array( [ [1.0 / 8, 5.0 / 8, 4.0 / 8], [4.0 / 8, 8.0 / 8, 7.0 / 8], [4.0 / 8, 4.0 / 8, 3.0 / 8], ], dtype=">f8", ), 10, ) elif boundary == "wrap": assert_array_almost_equal_nulp( z, np.array( [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], dtype=">f8" ), 10, ) elif boundary == "extend": assert_array_almost_equal_nulp( z, np.array( [ [2.0 / 8, 9.0 / 8, 16.0 / 8], [5.0 / 8, 8.0 / 8, 11.0 / 8], [8.0 / 8, 7.0 / 8, 6.0 / 8], ], dtype=">f8", ), 10, ) else: raise ValueError("Invalid boundary specification") @pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS) def test_non_normalized_kernel_2D(self, boundary): x = np.array([[0.0, 0.0, 4.0], [1.0, 2.0, 0.0], [0.0, 3.0, 0.0]], dtype="float") y = np.array( [[1.0, -1.0, 1.0], [-1.0, 0.0, -1.0], [1.0, -1.0, 1.0]], dtype="float" ) z = convolve( x, y, boundary=boundary, nan_treatment="fill", normalize_kernel=False ) if boundary is None: assert_array_almost_equal_nulp( z, np.array( [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype="float" ), 10, ) elif boundary == "fill": assert_array_almost_equal_nulp( z, np.array( [[1.0, -5.0, 2.0], [1.0, 0.0, -3.0], [-2.0, -1.0, -1.0]], dtype="float", ), 10, ) elif boundary == "wrap": assert_array_almost_equal_nulp( z, np.array( [[0.0, -8.0, 6.0], [5.0, 0.0, -4.0], [2.0, 3.0, -4.0]], dtype="float", ), 10, ) elif boundary == "extend": assert_array_almost_equal_nulp( z, np.array( [[2.0, -1.0, -2.0], [0.0, 0.0, 1.0], [2.0, -4.0, 2.0]], dtype="float", ), 10, ) else: raise ValueError("Invalid boundary specification") class TestConvolve3D: def test_list(self): """ Test that convolve works correctly when inputs are lists """ x = [ [[1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1]], ] z = convolve(x, x, boundary="fill", fill_value=1, normalize_kernel=False) assert_array_almost_equal_nulp(z / 27, x, 10) @pytest.mark.parametrize(("dtype_array", "dtype_kernel"), VALID_DTYPE_MATRIX) def test_dtype(self, dtype_array, dtype_kernel): """ Test that 32- and 64-bit floats are correctly handled """ x = np.array( [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype=dtype_array ) y = np.array( [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]], dtype=dtype_kernel ) z = convolve(x, y) assert x.dtype == z.dtype @pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS) def test_unity_1x1x1_none(self, boundary): """ Test that a 1x1x1 unit kernel returns the same array """ x = np.array( [ [[1.0, 2.0, 1.0], [2.0, 3.0, 1.0], [3.0, 2.0, 5.0]], [[4.0, 3.0, 1.0], [5.0, 0.0, 2.0], [6.0, 1.0, 1.0]], [[7.0, 0.0, 2.0], [8.0, 2.0, 3.0], [9.0, 2.0, 2.0]], ], dtype=">f8", ) y = np.array([[[1.0]]], dtype=">f8") z = convolve(x, y, boundary=boundary) assert np.all(z == x) @pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS) def test_unity_3x3x3(self, boundary): """ Test that a 3x3x3 unit kernel returns the same array (except when boundary is None). """ x = np.array( [ [[1.0, 2.0, 1.0], [2.0, 3.0, 1.0], [3.0, 2.0, 5.0]], [[4.0, 3.0, 1.0], [5.0, 3.0, 2.0], [6.0, 1.0, 1.0]], [[7.0, 0.0, 2.0], [8.0, 2.0, 3.0], [9.0, 2.0, 2.0]], ], dtype=">f8", ) y = np.zeros((3, 3, 3), dtype=">f8") y[1, 1, 1] = 1.0 z = convolve(x, y, boundary=boundary) if boundary is None: assert np.all( z == np.array( [ [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0], [0.0, 3.0, 0.0], [0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], ], dtype=">f8", ) ) else: assert np.all(z == x) @pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS) def test_uniform_3x3x3(self, boundary): """ Test that the different modes are producing the correct results using a 3x3 uniform kernel. """ x = np.array( [ [[1.0, 2.0, 1.0], [2.0, 3.0, 1.0], [3.0, 2.0, 5.0]], [[4.0, 3.0, 1.0], [5.0, 3.0, 2.0], [6.0, 1.0, 1.0]], [[7.0, 0.0, 2.0], [8.0, 2.0, 3.0], [9.0, 2.0, 2.0]], ], dtype=">f8", ) y = np.ones((3, 3, 3), dtype=">f8") z = convolve(x, y, boundary=boundary, normalize_kernel=False) if boundary is None: assert_array_almost_equal_nulp( z, np.array( [ [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0], [0.0, 81.0, 0.0], [0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], ], dtype=">f8", ), 10, ) elif boundary == "fill": assert_array_almost_equal_nulp( z, np.array( [ [[23.0, 28.0, 16.0], [35.0, 46.0, 25.0], [25.0, 34.0, 18.0]], [[40.0, 50.0, 23.0], [63.0, 81.0, 36.0], [46.0, 60.0, 27.0]], [[32.0, 40.0, 16.0], [50.0, 61.0, 22.0], [36.0, 44.0, 16.0]], ], dtype=">f8", ), 10, ) elif boundary == "wrap": assert_array_almost_equal_nulp( z, np.array( [ [[81.0, 81.0, 81.0], [81.0, 81.0, 81.0], [81.0, 81.0, 81.0]], [[81.0, 81.0, 81.0], [81.0, 81.0, 81.0], [81.0, 81.0, 81.0]], [[81.0, 81.0, 81.0], [81.0, 81.0, 81.0], [81.0, 81.0, 81.0]], ], dtype=">f8", ), 10, ) else: assert_array_almost_equal_nulp( z, np.array( [ [[65.0, 54.0, 43.0], [75.0, 66.0, 57.0], [85.0, 78.0, 71.0]], [[96.0, 71.0, 46.0], [108.0, 81.0, 54.0], [120.0, 91.0, 62.0]], [ [127.0, 88.0, 49.0], [141.0, 96.0, 51.0], [155.0, 104.0, 53.0], ], ], dtype=">f8", ), 10, ) @pytest.mark.parametrize( ("boundary", "nan_treatment"), itertools.product(BOUNDARY_OPTIONS, NANHANDLING_OPTIONS), ) def test_unity_3x3x3_withnan(self, boundary, nan_treatment): """ Test that a 3x3x3 unit kernel returns the same array (except when boundary is None). This version includes a NaN value in the original array. """ x = np.array( [ [[1.0, 2.0, 1.0], [2.0, 3.0, 1.0], [3.0, 2.0, 5.0]], [[4.0, 3.0, 1.0], [5.0, np.nan, 2.0], [6.0, 1.0, 1.0]], [[7.0, 0.0, 2.0], [8.0, 2.0, 3.0], [9.0, 2.0, 2.0]], ], dtype=">f8", ) y = np.zeros((3, 3, 3), dtype=">f8") y[1, 1, 1] = 1.0 z = convolve( x, y, boundary=boundary, nan_treatment=nan_treatment, preserve_nan=True ) assert np.isnan(z[1, 1, 1]) x = np.nan_to_num(z) z = np.nan_to_num(z) if boundary is None: assert np.all( z == np.array( [ [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], ], dtype=">f8", ) ) else: assert np.all(z == x) @pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS) def test_uniform_3x3x3_withnan_filled(self, boundary): """ Test that the different modes are producing the correct results using a 3x3 uniform kernel. This version includes a NaN value in the original array. """ x = np.array( [ [[1.0, 2.0, 1.0], [2.0, 3.0, 1.0], [3.0, 2.0, 5.0]], [[4.0, 3.0, 1.0], [5.0, np.nan, 2.0], [6.0, 1.0, 1.0]], [[7.0, 0.0, 2.0], [8.0, 2.0, 3.0], [9.0, 2.0, 2.0]], ], dtype=">f8", ) y = np.ones((3, 3, 3), dtype=">f8") z = convolve( x, y, boundary=boundary, nan_treatment="fill", normalize_kernel=False ) if boundary is None: assert_array_almost_equal_nulp( z, np.array( [ [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0], [0.0, 78.0, 0.0], [0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], ], dtype=">f8", ), 10, ) elif boundary == "fill": assert_array_almost_equal_nulp( z, np.array( [ [[20.0, 25.0, 13.0], [32.0, 43.0, 22.0], [22.0, 31.0, 15.0]], [[37.0, 47.0, 20.0], [60.0, 78.0, 33.0], [43.0, 57.0, 24.0]], [[29.0, 37.0, 13.0], [47.0, 58.0, 19.0], [33.0, 41.0, 13.0]], ], dtype=">f8", ), 10, ) elif boundary == "wrap": assert_array_almost_equal_nulp( z, np.array( [ [[78.0, 78.0, 78.0], [78.0, 78.0, 78.0], [78.0, 78.0, 78.0]], [[78.0, 78.0, 78.0], [78.0, 78.0, 78.0], [78.0, 78.0, 78.0]], [[78.0, 78.0, 78.0], [78.0, 78.0, 78.0], [78.0, 78.0, 78.0]], ], dtype=">f8", ), 10, ) elif boundary == "extend": assert_array_almost_equal_nulp( z, np.array( [ [[62.0, 51.0, 40.0], [72.0, 63.0, 54.0], [82.0, 75.0, 68.0]], [[93.0, 68.0, 43.0], [105.0, 78.0, 51.0], [117.0, 88.0, 59.0]], [ [124.0, 85.0, 46.0], [138.0, 93.0, 48.0], [152.0, 101.0, 50.0], ], ], dtype=">f8", ), 10, ) else: raise ValueError("Invalid Boundary Option") @pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS) def test_uniform_3x3x3_withnan_interped(self, boundary): """ Test that the different modes are producing the correct results using a 3x3 uniform kernel. This version includes a NaN value in the original array. """ x = np.array( [ [[1.0, 2.0, 1.0], [2.0, 3.0, 1.0], [3.0, 2.0, 5.0]], [[4.0, 3.0, 1.0], [5.0, np.nan, 2.0], [6.0, 1.0, 1.0]], [[7.0, 0.0, 2.0], [8.0, 2.0, 3.0], [9.0, 2.0, 2.0]], ], dtype=">f8", ) y = np.ones((3, 3, 3), dtype=">f8") z = convolve( x, y, boundary=boundary, nan_treatment="interpolate", normalize_kernel=True ) kernsum = y.sum() - 1 # one nan is missing mid = x[np.isfinite(x)].sum() / kernsum if boundary is None: assert_array_almost_equal_nulp( z, np.array( [ [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0], [0.0, 78.0, 0.0], [0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], ], dtype=">f8", ) / kernsum, 10, ) elif boundary == "fill": assert_array_almost_equal_nulp( z, np.array( [ [[20.0, 25.0, 13.0], [32.0, 43.0, 22.0], [22.0, 31.0, 15.0]], [[37.0, 47.0, 20.0], [60.0, 78.0, 33.0], [43.0, 57.0, 24.0]], [[29.0, 37.0, 13.0], [47.0, 58.0, 19.0], [33.0, 41.0, 13.0]], ], dtype=">f8", ) / kernsum, 10, ) elif boundary == "wrap": assert_array_almost_equal_nulp(z, np.tile(mid.astype(">f8"), [3, 3, 3]), 10) elif boundary == "extend": assert_array_almost_equal_nulp( z, np.array( [ [[62.0, 51.0, 40.0], [72.0, 63.0, 54.0], [82.0, 75.0, 68.0]], [[93.0, 68.0, 43.0], [105.0, 78.0, 51.0], [117.0, 88.0, 59.0]], [ [124.0, 85.0, 46.0], [138.0, 93.0, 48.0], [152.0, 101.0, 50.0], ], ], dtype=">f8", ) / kernsum, 10, ) else: raise ValueError("Invalid Boundary Option") @pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS) def test_asymmetric_kernel(boundary): """ Regression test for #6264: make sure that asymmetric convolution functions go the right direction """ x = np.array([3.0, 0.0, 1.0], dtype=">f8") y = np.array([1, 2, 3], dtype=">f8") z = convolve(x, y, boundary=boundary, normalize_kernel=False) if boundary == "fill": assert_array_almost_equal_nulp(z, np.array([6.0, 10.0, 2.0], dtype="float"), 10) elif boundary is None: assert_array_almost_equal_nulp(z, np.array([0.0, 10.0, 0.0], dtype="float"), 10) elif boundary == "extend": assert_array_almost_equal_nulp( z, np.array([15.0, 10.0, 3.0], dtype="float"), 10 ) elif boundary == "wrap": assert_array_almost_equal_nulp(z, np.array([9.0, 10.0, 5.0], dtype="float"), 10) @pytest.mark.parametrize("ndims", (1, 2, 3)) def test_convolution_consistency(ndims): np.random.seed(0) array = np.random.randn(*([3] * ndims)) np.random.seed(0) kernel = np.random.rand(*([3] * ndims)) conv_f = convolve_fft(array, kernel, boundary="fill") conv_d = convolve(array, kernel, boundary="fill") assert_array_almost_equal_nulp(conv_f, conv_d, 30) def test_astropy_convolution_against_numpy(): x = np.array([1, 2, 3]) y = np.array([5, 4, 3, 2, 1]) assert_array_almost_equal( np.convolve(y, x, "same"), convolve(y, x, normalize_kernel=False) ) assert_array_almost_equal( np.convolve(y, x, "same"), convolve_fft(y, x, normalize_kernel=False) ) @pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy") def test_astropy_convolution_against_scipy(): from scipy.signal import fftconvolve x = np.array([1, 2, 3]) y = np.array([5, 4, 3, 2, 1]) assert_array_almost_equal( fftconvolve(y, x, "same"), convolve(y, x, normalize_kernel=False) ) assert_array_almost_equal( fftconvolve(y, x, "same"), convolve_fft(y, x, normalize_kernel=False) ) @pytest.mark.skipif(not HAS_PANDAS, reason="Requires pandas") def test_regression_6099(): import pandas wave = np.array(np.linspace(5000, 5100, 10)) boxcar = 3 nonseries_result = convolve(wave, np.ones((boxcar,)) / boxcar) wave_series = pandas.Series(wave) series_result = convolve(wave_series, np.ones((boxcar,)) / boxcar) assert_array_almost_equal(nonseries_result, series_result) def test_invalid_array_convolve(): kernel = np.ones(3) / 3.0 with pytest.raises(TypeError): convolve("glork", kernel) @pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS) def test_non_square_kernel_asymmetric(boundary): # Regression test for a bug that occurred when using non-square kernels in # 2D when using boundary=None kernel = np.array([[1, 2, 3, 2, 1], [0, 1, 2, 1, 0], [0, 0, 0, 0, 0]]) image = np.zeros((13, 13)) image[6, 6] = 1 result = convolve(image, kernel, normalize_kernel=False, boundary=boundary) assert_allclose(result[5:8, 4:9], kernel) @pytest.mark.parametrize( ("boundary", "normalize_kernel"), itertools.product(BOUNDARY_OPTIONS, NORMALIZE_OPTIONS), ) def test_uninterpolated_nan_regions(boundary, normalize_kernel): # Issue #8086 # Test NaN interpolation of contiguous NaN regions with kernels of size # identical and greater than that of the region of NaN values. # Test case: kernel.shape == NaN_region.shape kernel = Gaussian2DKernel(1, 5, 5) nan_centroid = np.full(kernel.shape, np.nan) image = np.pad( nan_centroid, pad_width=kernel.shape[0] * 2, mode="constant", constant_values=1 ) with pytest.warns( AstropyUserWarning, match=r"nan_treatment='interpolate', however, NaN values detected " r"post convolution. A contiguous region of NaN values, larger " r"than the kernel size, are present in the input array. " r"Increase the kernel size to avoid this.", ): result = convolve( image, kernel, boundary=boundary, nan_treatment="interpolate", normalize_kernel=normalize_kernel, ) assert np.any(np.isnan(result)) # Test case: kernel.shape > NaN_region.shape nan_centroid = np.full( (kernel.shape[0] - 1, kernel.shape[1] - 1), np.nan ) # 1 smaller than kerenel image = np.pad( nan_centroid, pad_width=kernel.shape[0] * 2, mode="constant", constant_values=1 ) result = convolve( image, kernel, boundary=boundary, nan_treatment="interpolate", normalize_kernel=normalize_kernel, ) assert ~np.any(np.isnan(result)) # Note: negation def test_regressiontest_issue9168(): """ Issue #9168 pointed out that kernels can be (unitless) quantities, which leads to crashes when inplace modifications are made to arrays in convolve/convolve_fft, so we now strip the quantity aspects off of kernels. """ x = np.array( [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], ) kernel_fwhm = 1 * u.arcsec pixel_size = 1 * u.arcsec kernel = Gaussian2DKernel(x_stddev=kernel_fwhm / pixel_size) convolve_fft(x, kernel, boundary="fill", fill_value=np.nan, preserve_nan=True) convolve(x, kernel, boundary="fill", fill_value=np.nan, preserve_nan=True) def test_convolve_nan_zero_sum_kernel(): with pytest.raises( ValueError, match=( "Setting nan_treatment='interpolate' " "requires the kernel to be normalized, but the " "input kernel has a sum close to zero. For a " "zero-sum kernel and data with NaNs, set " "nan_treatment='fill'." ), ): convolve([1, np.nan, 3], [-1, 2, -1], normalize_kernel=False)
ba18d1d9a51c6f025d5a42c74d1da9b3e04c2356a9d96e637da04bcce84ace9b
# Licensed under a 3-clause BSD style license - see LICENSE.rst import math import numpy as np import pytest from numpy.testing import assert_allclose, assert_almost_equal from astropy.convolution.convolve import convolve, convolve_fft, convolve_models from astropy.modeling import fitting, models from astropy.utils.compat.optional_deps import HAS_SCIPY from astropy.utils.misc import NumpyRNGContext class TestConvolve1DModels: @pytest.mark.parametrize("mode", [convolve_fft, convolve]) @pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy") def test_is_consistency_with_astropy_convolution(self, mode): kernel = models.Gaussian1D(1, 0, 1) model = models.Gaussian1D(1, 0, 1) model_conv = convolve_models(model, kernel, mode=mode.__name__) x = np.arange(-5, 6) ans = mode(model(x), kernel(x)) assert_allclose(ans, model_conv(x), atol=1e-5) @pytest.mark.parametrize("mode", ["convolve_fft", "convolve"]) @pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy") def test_against_scipy(self, mode): from scipy.signal import fftconvolve kernel = models.Gaussian1D(1, 0, 1) model = models.Gaussian1D(1, 0, 1) model_conv = convolve_models(model, kernel, mode=mode) x = np.arange(-5, 6) ans = fftconvolve(kernel(x), model(x), mode="same") assert_allclose(ans, model_conv(x) * kernel(x).sum(), atol=1e-5) @pytest.mark.parametrize("mode", ["convolve_fft", "convolve"]) @pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy") def test_against_scipy_with_additional_keywords(self, mode): from scipy.signal import fftconvolve kernel = models.Gaussian1D(1, 0, 1) model = models.Gaussian1D(1, 0, 1) model_conv = convolve_models(model, kernel, mode=mode, normalize_kernel=False) x = np.arange(-5, 6) ans = fftconvolve(kernel(x), model(x), mode="same") assert_allclose(ans, model_conv(x), atol=1e-5) @pytest.mark.parametrize("mode", ["convolve_fft", "convolve"]) def test_sum_of_gaussians(self, mode): """ Test that convolving N(a, b) with N(c, d) gives N(a + c, b + d), where N(., .) stands for Gaussian probability density function, in which a and c are their means and b and d are their variances. """ kernel = models.Gaussian1D(1 / math.sqrt(2 * np.pi), 1, 1) model = models.Gaussian1D(1 / math.sqrt(2 * np.pi), 3, 1) model_conv = convolve_models(model, kernel, mode=mode, normalize_kernel=False) ans = models.Gaussian1D(1 / (2 * math.sqrt(np.pi)), 4, np.sqrt(2)) x = np.arange(-5, 6) assert_allclose(ans(x), model_conv(x), atol=1e-3) @pytest.mark.parametrize("mode", ["convolve_fft", "convolve"]) def test_convolve_box_models(self, mode): kernel = models.Box1D() model = models.Box1D() model_conv = convolve_models(model, kernel, mode=mode) x = np.linspace(-1, 1, 99) ans = (x + 1) * (x < 0) + (-x + 1) * (x >= 0) assert_allclose(ans, model_conv(x), atol=1e-3) @pytest.mark.parametrize("mode", ["convolve_fft", "convolve"]) @pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy") def test_fitting_convolve_models(self, mode): """ test that a convolve model can be fitted """ b1 = models.Box1D() g1 = models.Gaussian1D() x = np.linspace(-5, 5, 99) fake_model = models.Gaussian1D(amplitude=10) with NumpyRNGContext(123): fake_data = fake_model(x) + np.random.normal(size=len(x)) init_model = convolve_models(b1, g1, mode=mode, normalize_kernel=False) fitter = fitting.LevMarLSQFitter() fitted_model = fitter(init_model, x, fake_data) me = np.mean(fitted_model(x) - fake_data) assert_almost_equal(me, 0.0, decimal=2)
c65a90d76e24c92d168a1eb53aa7d406d961256f2428c6a62fbf5a3fea4e3cbd
# Licensed under a 3-clause BSD style license - see LICENSE.rst import timeit # largest image size to use for "linear" and fft convolutions max_exponents_linear = {1: 15, 2: 7, 3: 5} max_exponents_fft = {1: 15, 2: 10, 3: 7} if __name__ == "__main__": for ndims in [1, 2, 3]: print( f"\n{ndims}-dimensional arrays ('n' is the size of the image AND " "the kernel)" ) print(" ".join(["%17s" % n for n in ("n", "convolve", "convolve_fft")])) for ii in range(3, max_exponents_fft[ndims]): # array = np.random.random([2**ii]*ndims) # test ODD sizes too if ii < max_exponents_fft[ndims]: setup = """ import numpy as np from astropy.convolution.convolve import convolve from astropy.convolution.convolve import convolve_fft array = np.random.random([%i]*%i) kernel = np.random.random([%i]*%i)""" % ( 2**ii - 1, ndims, 2**ii - 1, ndims, ) print("%16i:" % (int(2**ii - 1)), end=" ") if ii <= max_exponents_linear[ndims]: for convolve_type, extra in zip( ("", "_fft"), ("", "fft_pad=False") ): statement = ( f"convolve{convolve_type}(array, kernel, " f"boundary='fill', {extra})" ) besttime = min( timeit.Timer(stmt=statement, setup=setup).repeat(3, 10) ) print(f"{besttime:17f}", end=" ") else: print("%17s" % "skipped", end=" ") statement = "convolve_fft(array, kernel, boundary='fill')" besttime = min( timeit.Timer(stmt=statement, setup=setup).repeat(3, 10) ) print(f"{besttime:17f}", end=" ") print() setup = """ import numpy as np from astropy.convolution.convolve import convolve from astropy.convolution.convolve import convolve_fft array = np.random.random([%i]*%i) kernel = np.random.random([%i]*%i)""" % ( 2**ii, ndims, 2**ii, ndims, ) print("%16i:" % (int(2**ii)), end=" ") if ii <= max_exponents_linear[ndims]: for convolve_type in ( "", "_fft", ): # convolve doesn't allow even-sized kernels if convolve_type == "": print("%17s" % "-", end=" ") else: statement = ( f"convolve{convolve_type}(array, kernel, boundary='fill')" ) besttime = min( timeit.Timer(stmt=statement, setup=setup).repeat(3, 10) ) print(f"{besttime:17f}", end=" ") else: print("%17s" % "skipped", end=" ") statement = "convolve_fft(array, kernel, boundary='fill')" besttime = min(timeit.Timer(stmt=statement, setup=setup).repeat(3, 10)) print(f"{besttime:17f}", end=" ") print() """ Unfortunately, these tests are pretty strongly inconclusive NOTE: Runtime has units seconds and represents wall clock time. RESULTS on a late 2013 Mac Pro: 3.5 GHz 6-Core Intel Xeon E5 32 GB 1866 MHz DDR3 ECC Python 3.5.4 :: Anaconda custom (x86_64) clang version 6.0.0 (tags/RELEASE_600/final) llvm-opnemp r327556 | grokos | 2018-03-14 15:11:36 -0400 (Wed, 14 Mar 2018) With OpenMP (hyperthreaded 12procs), convolve() only: 1-dimensional arrays ('n' is the size of the image AND the kernel) n convolve convolve_fft 7: 0.002895 0.007321 15: 0.002684 0.008028 31: 0.002733 0.008684 63: 0.002728 0.009127 127: 0.002851 0.012659 255: 0.002835 0.010550 511: 0.003051 0.017137 1023: 0.004042 0.019384 2047: 0.007371 0.049246 4095: 0.021903 0.039821 8191: 0.067098 8.335749 16383: 0.256072 0.272165 2-dimensional arrays ('n' is the size of the image AND the kernel) n convolve convolve_fft 7: 0.002696 0.014745 15: 0.002839 0.014826 31: 0.004286 0.045167 63: 0.022941 0.063715 127: 0.325557 0.925577 255: skipped 0.694621 511: skipped 3.734946 3-dimensional arrays ('n' is the size of the image AND the kernel) n convolve convolve_fft 7: 0.003502 0.033121 8: 0.003407 0.030351 15: 0.026338 0.062235 31: 1.239503 1.586930 63: skipped 10.792675 With OpenMP but single threaded (n_threads = 1), convolve() only: 1-dimensional arrays ('n' is the size of the image AND the kernel) n convolve convolve_fft 7: 0.001754 0.004687 15: 0.001706 0.005133 31: 0.001744 0.005381 63: 0.001725 0.005582 127: 0.001801 0.007405 255: 0.002262 0.006528 511: 0.003866 0.009913 1023: 0.009820 0.011511 2047: 0.034707 0.028171 4095: 0.132908 0.024133 8191: 0.527692 8.311933 16383: 2.103046 0.269368 2-dimensional arrays ('n' is the size of the image AND the kernel) n convolve convolve_fft 7: 0.001734 0.009458 15: 0.002336 0.010310 31: 0.009123 0.025427 63: 0.126701 0.040610 127: 2.126114 0.926549 255: skipped 0.690896 511: skipped 3.756475 3-dimensional arrays ('n' is the size of the image AND the kernel) n convolve convolve_fft 7: 0.002822 0.019498 15: 0.096008 0.063744 31: 7.373533 1.578913 63: skipped 10.811530 RESULTS on a 2011 Mac Air: 1-dimensional arrays ('n' is the size of the image AND the kernel) n convolve convolve_fftnp convolve_fftw convolve_fftsp 7: 0.000408 0.002334 0.005571 0.002677 15: 0.000361 0.002491 0.005648 0.002678 31: 0.000535 0.002450 0.005988 0.002880 63: 0.000509 0.002876 0.008003 0.002981 127: 0.000801 0.004080 0.008513 0.003932 255: 0.002453 0.003111 0.007518 0.003564 511: 0.008394 0.006224 0.010247 0.005991 1023: 0.028741 0.007538 0.009591 0.007696 2047: 0.106323 0.021575 0.022041 0.020682 4095: 0.411936 0.021675 0.019761 0.020939 8191: 1.664517 8.278320 0.073001 7.803563 16383: 6.654678 0.251661 0.202271 0.222171 2-dimensional arrays ('n' is the size of the image AND the kernel) n convolve convolve_fftnp convolve_fftw convolve_fftsp 7: 0.000552 0.003524 0.006667 0.004318 15: 0.002986 0.005093 0.012941 0.005951 31: 0.074360 0.033973 0.031800 0.036937 63: 0.848471 0.057407 0.052192 0.053213 127: 14.656414 1.005329 0.402113 0.955279 255: skipped 1.715546 1.566876 1.745338 511: skipped 4.066155 4.303350 3.930661 3-dimensional arrays ('n' is the size of the image AND the kernel) n convolve convolve_fftnp convolve_fftw convolve_fftsp 7: 0.009239 0.012957 0.011957 0.015997 15: 0.772434 0.075621 0.056711 0.079508 31: 62.824051 2.295193 1.189505 2.351136 63: skipped 11.250225 10.982726 10.585744 On a 2009 Mac Pro: 1-dimensional arrays ('n' is the size of the image AND the kernel) n convolve convolve_fftnp convolve_fftw convolve_fftsp 7: 0.000360 0.002269 0.004986 0.002476 15: 0.000364 0.002255 0.005244 0.002471 31: 0.000385 0.002380 0.005422 0.002588 63: 0.000474 0.002407 0.005392 0.002637 127: 0.000752 0.004122 0.007827 0.003966 255: 0.004316 0.003258 0.006566 0.003324 511: 0.011517 0.007158 0.009898 0.006238 1023: 0.034105 0.009211 0.009468 0.008260 2047: 0.113620 0.028097 0.020662 0.021603 4095: 0.403373 0.023211 0.018767 0.020065 8191: 1.519329 8.454573 0.211436 7.212381 16383: 5.887481 0.317428 0.153344 0.237119 2-dimensional arrays ('n' is the size of the image AND the kernel) n convolve convolve_fftnp convolve_fftw convolve_fftsp 7: 0.000474 0.003470 0.006131 0.003503 15: 0.002011 0.004481 0.007825 0.004496 31: 0.027291 0.019433 0.014841 0.018034 63: 0.445680 0.038171 0.026753 0.037404 127: 7.003774 0.925921 0.282591 0.762671 255: skipped 0.804682 0.708849 0.869368 511: skipped 3.643626 3.687562 4.584770 3-dimensional arrays ('n' is the size of the image AND the kernel) n convolve convolve_fftnp convolve_fftw convolve_fftsp 7: 0.004520 0.011519 0.009464 0.012335 15: 0.329566 0.060978 0.045495 0.073692 31: 24.935228 1.654920 0.710509 1.773879 63: skipped 8.982771 12.407683 16.900078 """
bf79cde679ae95f5a90df85863c3ecf7eaa54bc2c3b233101fd37a328e2f51be
# Licensed under a 3-clause BSD style license - see LICENSE.rst import io import os import subprocess import sys import pytest from astropy.config import configuration, create_config_file, paths, set_temp_config from astropy.utils.data import get_pkg_data_filename from astropy.utils.exceptions import AstropyDeprecationWarning OLD_CONFIG = {} def setup_module(): OLD_CONFIG.clear() OLD_CONFIG.update(configuration._cfgobjs) def teardown_module(): configuration._cfgobjs.clear() configuration._cfgobjs.update(OLD_CONFIG) def test_paths(): assert "astropy" in paths.get_config_dir() assert "astropy" in paths.get_cache_dir() assert "testpkg" in paths.get_config_dir(rootname="testpkg") assert "testpkg" in paths.get_cache_dir(rootname="testpkg") def test_set_temp_config(tmp_path, monkeypatch): # Check that we start in an understood state. assert configuration._cfgobjs == OLD_CONFIG # Temporarily remove any temporary overrides of the configuration dir. monkeypatch.setattr(paths.set_temp_config, "_temp_path", None) orig_config_dir = paths.get_config_dir(rootname="astropy") (temp_config_dir := tmp_path / "config").mkdir() temp_astropy_config = temp_config_dir / "astropy" # Test decorator mode @paths.set_temp_config(temp_config_dir) def test_func(): assert paths.get_config_dir(rootname="astropy") == str(temp_astropy_config) # Test temporary restoration of original default with paths.set_temp_config() as d: assert d == orig_config_dir == paths.get_config_dir(rootname="astropy") test_func() # Test context manager mode (with cleanup) with paths.set_temp_config(temp_config_dir, delete=True): assert paths.get_config_dir(rootname="astropy") == str(temp_astropy_config) assert not temp_config_dir.exists() # Check that we have returned to our old configuration. assert configuration._cfgobjs == OLD_CONFIG def test_set_temp_cache(tmp_path, monkeypatch): monkeypatch.setattr(paths.set_temp_cache, "_temp_path", None) orig_cache_dir = paths.get_cache_dir(rootname="astropy") (temp_cache_dir := tmp_path / "cache").mkdir() temp_astropy_cache = temp_cache_dir / "astropy" # Test decorator mode @paths.set_temp_cache(temp_cache_dir) def test_func(): assert paths.get_cache_dir(rootname="astropy") == str(temp_astropy_cache) # Test temporary restoration of original default with paths.set_temp_cache() as d: assert d == orig_cache_dir == paths.get_cache_dir(rootname="astropy") test_func() # Test context manager mode (with cleanup) with paths.set_temp_cache(temp_cache_dir, delete=True): assert paths.get_cache_dir(rootname="astropy") == str(temp_astropy_cache) assert not temp_cache_dir.exists() def test_set_temp_cache_resets_on_exception(tmp_path): """Test for regression of bug #9704""" t = paths.get_cache_dir() (a := tmp_path / "a").write_text("not a good cache\n") with pytest.raises(OSError), paths.set_temp_cache(a): pass assert t == paths.get_cache_dir() def test_config_file(): from astropy.config.configuration import get_config, reload_config apycfg = get_config("astropy") assert apycfg.filename.endswith("astropy.cfg") cfgsec = get_config("astropy.config") assert cfgsec.depth == 1 assert cfgsec.name == "config" assert cfgsec.parent.filename.endswith("astropy.cfg") # try with a different package name, still inside astropy config dir: testcfg = get_config("testpkg", rootname="astropy") parts = os.path.normpath(testcfg.filename).split(os.sep) assert ".astropy" in parts or "astropy" in parts assert parts[-1] == "testpkg.cfg" configuration._cfgobjs["testpkg"] = None # HACK # try with a different package name, no specified root name (should # default to astropy): testcfg = get_config("testpkg") parts = os.path.normpath(testcfg.filename).split(os.sep) assert ".astropy" in parts or "astropy" in parts assert parts[-1] == "testpkg.cfg" configuration._cfgobjs["testpkg"] = None # HACK # try with a different package name, specified root name: testcfg = get_config("testpkg", rootname="testpkg") parts = os.path.normpath(testcfg.filename).split(os.sep) assert ".testpkg" in parts or "testpkg" in parts assert parts[-1] == "testpkg.cfg" configuration._cfgobjs["testpkg"] = None # HACK # try with a subpackage with specified root name: testcfg_sec = get_config("testpkg.somemodule", rootname="testpkg") parts = os.path.normpath(testcfg_sec.parent.filename).split(os.sep) assert ".testpkg" in parts or "testpkg" in parts assert parts[-1] == "testpkg.cfg" configuration._cfgobjs["testpkg"] = None # HACK reload_config("astropy") def check_config(conf): # test that the output contains some lines that we expect assert "# unicode_output = False" in conf assert "[io.fits]" in conf assert "[table]" in conf assert "# replace_warnings = ," in conf assert "[table.jsviewer]" in conf assert "# css_urls = https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css," in conf # fmt: skip assert "[visualization.wcsaxes]" in conf assert "## Whether to log exceptions before raising them." in conf assert "# log_exceptions = False" in conf def test_generate_config(tmp_path): from astropy.config.configuration import generate_config out = io.StringIO() generate_config("astropy", out) conf = out.getvalue() outfile = tmp_path / "astropy.cfg" generate_config("astropy", outfile) with open(outfile) as fp: conf2 = fp.read() for c in (conf, conf2): check_config(c) def test_generate_config2(tmp_path): """Test that generate_config works with the default filename.""" with set_temp_config(tmp_path): from astropy.config.configuration import generate_config generate_config("astropy") assert os.path.exists(tmp_path / "astropy" / "astropy.cfg") with open(tmp_path / "astropy" / "astropy.cfg") as fp: conf = fp.read() check_config(conf) def test_create_config_file(tmp_path, caplog): with set_temp_config(tmp_path): create_config_file("astropy") # check that the config file has been created assert ( "The configuration file has been successfully written" in caplog.records[0].message ) assert os.path.exists(tmp_path / "astropy" / "astropy.cfg") with open(tmp_path / "astropy" / "astropy.cfg") as fp: conf = fp.read() check_config(conf) caplog.clear() # now modify the config file conf = conf.replace("# unicode_output = False", "unicode_output = True") with open(tmp_path / "astropy" / "astropy.cfg", mode="w") as fp: fp.write(conf) with set_temp_config(tmp_path): create_config_file("astropy") # check that the config file has not been overwritten since it was modified assert ( "The configuration file already exists and seems to have been customized" in caplog.records[0].message ) caplog.clear() with set_temp_config(tmp_path): create_config_file("astropy", overwrite=True) # check that the config file has been overwritten assert ( "The configuration file has been successfully written" in caplog.records[0].message ) def test_configitem(): from astropy.config.configuration import ConfigItem, ConfigNamespace, get_config ci = ConfigItem(34, "this is a Description") class Conf(ConfigNamespace): tstnm = ci conf = Conf() assert ci.module == "astropy.config.tests.test_configs" assert ci() == 34 assert ci.description == "this is a Description" assert conf.tstnm == 34 sec = get_config(ci.module) assert sec["tstnm"] == 34 ci.description = "updated Descr" ci.set(32) assert ci() == 32 # It's useful to go back to the default to allow other test functions to # call this one and still be in the default configuration. ci.description = "this is a Description" ci.set(34) assert ci() == 34 # Test iterator for one-item namespace result = [x for x in conf] assert result == ["tstnm"] result = [x for x in conf.keys()] assert result == ["tstnm"] result = [x for x in conf.values()] assert result == [ci] result = [x for x in conf.items()] assert result == [("tstnm", ci)] def test_configitem_types(): from astropy.config.configuration import ConfigItem, ConfigNamespace ci1 = ConfigItem(34) ci2 = ConfigItem(34.3) ci3 = ConfigItem(True) ci4 = ConfigItem("astring") class Conf(ConfigNamespace): tstnm1 = ci1 tstnm2 = ci2 tstnm3 = ci3 tstnm4 = ci4 conf = Conf() assert isinstance(conf.tstnm1, int) assert isinstance(conf.tstnm2, float) assert isinstance(conf.tstnm3, bool) assert isinstance(conf.tstnm4, str) with pytest.raises(TypeError): conf.tstnm1 = 34.3 conf.tstnm2 = 12 # this would should succeed as up-casting with pytest.raises(TypeError): conf.tstnm3 = "fasd" with pytest.raises(TypeError): conf.tstnm4 = 546.245 # Test iterator for multi-item namespace. Assume ordered by insertion order. item_names = [x for x in conf] assert item_names == ["tstnm1", "tstnm2", "tstnm3", "tstnm4"] result = [x for x in conf.keys()] assert result == item_names result = [x for x in conf.values()] assert result == [ci1, ci2, ci3, ci4] result = [x for x in conf.items()] assert result == [ ("tstnm1", ci1), ("tstnm2", ci2), ("tstnm3", ci3), ("tstnm4", ci4), ] def test_configitem_options(tmp_path): from astropy.config.configuration import ConfigItem, ConfigNamespace, get_config cio = ConfigItem(["op1", "op2", "op3"]) class Conf(ConfigNamespace): tstnmo = cio sec = get_config(cio.module) assert isinstance(cio(), str) assert cio() == "op1" assert sec["tstnmo"] == "op1" cio.set("op2") with pytest.raises(TypeError): cio.set("op5") assert sec["tstnmo"] == "op2" # now try saving apycfg = sec while apycfg.parent is not apycfg: apycfg = apycfg.parent f = tmp_path / "astropy.cfg" with open(f, "wb") as fd: apycfg.write(fd) with open(f, encoding="utf-8") as fd: lns = [x.strip() for x in fd.readlines()] assert "tstnmo = op2" in lns def test_config_noastropy_fallback(monkeypatch): """ Tests to make sure configuration items fall back to their defaults when there's a problem accessing the astropy directory """ # make sure the config directory is not searched monkeypatch.setenv("XDG_CONFIG_HOME", "foo") monkeypatch.delenv("XDG_CONFIG_HOME") monkeypatch.setattr(paths.set_temp_config, "_temp_path", None) # make sure the _find_or_create_root_dir function fails as though the # astropy dir could not be accessed def osraiser(dirnm, linkto, pkgname=None): raise OSError monkeypatch.setattr(paths, "_find_or_create_root_dir", osraiser) # also have to make sure the stored configuration objects are cleared monkeypatch.setattr(configuration, "_cfgobjs", {}) with pytest.raises(OSError): # make sure the config dir search fails paths.get_config_dir(rootname="astropy") # now run the basic tests, and make sure the warning about no astropy # is present test_configitem() def test_configitem_setters(): from astropy.config.configuration import ConfigItem, ConfigNamespace class Conf(ConfigNamespace): tstnm12 = ConfigItem(42, "this is another Description") conf = Conf() assert conf.tstnm12 == 42 with conf.set_temp("tstnm12", 45): assert conf.tstnm12 == 45 assert conf.tstnm12 == 42 conf.tstnm12 = 43 assert conf.tstnm12 == 43 with conf.set_temp("tstnm12", 46): assert conf.tstnm12 == 46 # Make sure it is reset even with Exception try: with conf.set_temp("tstnm12", 47): raise Exception except Exception: pass assert conf.tstnm12 == 43 def test_empty_config_file(): from astropy.config.configuration import is_unedited_config_file def get_content(fn): with open(get_pkg_data_filename(fn), encoding="latin-1") as fd: return fd.read() content = get_content("data/empty.cfg") assert is_unedited_config_file(content) content = get_content("data/not_empty.cfg") assert not is_unedited_config_file(content) class TestAliasRead: def setup_class(self): configuration._override_config_file = get_pkg_data_filename("data/alias.cfg") def test_alias_read(self): from astropy.utils.data import conf with pytest.warns( AstropyDeprecationWarning, match=r"Config parameter 'name_resolve_timeout' in section " r"\[coordinates.name_resolve\].*", ) as w: conf.reload() assert conf.remote_timeout == 42 assert len(w) == 1 def teardown_class(self): from astropy.utils.data import conf configuration._override_config_file = None conf.reload() def test_configitem_unicode(): from astropy.config.configuration import ConfigItem, ConfigNamespace, get_config cio = ConfigItem("ასტრონომიის") class Conf(ConfigNamespace): tstunicode = cio sec = get_config(cio.module) assert isinstance(cio(), str) assert cio() == "ასტრონომიის" assert sec["tstunicode"] == "ასტრონომიის" def test_warning_move_to_top_level(): # Check that the warning about deprecation config items in the # file works. See #2514 from astropy import conf configuration._override_config_file = get_pkg_data_filename("data/deprecated.cfg") try: with pytest.warns(AstropyDeprecationWarning) as w: conf.reload() conf.max_lines assert len(w) == 1 finally: configuration._override_config_file = None conf.reload() def test_no_home(): # "import astropy" fails when neither $HOME or $XDG_CONFIG_HOME # are set. To test, we unset those environment variables for a # subprocess and try to import astropy. test_path = os.path.dirname(__file__) astropy_path = os.path.abspath(os.path.join(test_path, "..", "..", "..")) env = os.environ.copy() paths = [astropy_path] if env.get("PYTHONPATH"): paths.append(env.get("PYTHONPATH")) env["PYTHONPATH"] = os.pathsep.join(paths) for val in ["HOME", "XDG_CONFIG_HOME"]: if val in env: del env[val] retcode = subprocess.check_call([sys.executable, "-c", "import astropy"], env=env) assert retcode == 0
9034d0a934260dcade004d613260307f6e8907b7947b38d6622cf2116abbb301
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np import pytest from numpy.testing import assert_array_equal from astropy import units as u from astropy.coordinates import Angle from astropy.tests.helper import assert_quantity_allclose from astropy.uncertainty import distributions as ds from astropy.uncertainty.core import Distribution from astropy.utils import NumpyRNGContext from astropy.utils.compat.optional_deps import HAS_SCIPY if HAS_SCIPY: from scipy.stats import norm # pylint: disable=W0611 SMAD_FACTOR = 1 / norm.ppf(0.75) class TestInit: @classmethod def setup_class(self): self.rates = np.array([1, 5, 30, 400])[:, np.newaxis] self.parr = np.random.poisson(self.rates, (4, 1000)) self.parr_t = np.random.poisson(self.rates.squeeze(), (1000, 4)) def test_numpy_init(self): # Test that we can initialize directly from a Numpy array Distribution(self.parr) def test_numpy_init_T(self): Distribution(self.parr_t.T) def test_quantity_init(self): # Test that we can initialize directly from a Quantity pq = self.parr << u.ct pqd = Distribution(pq) assert isinstance(pqd, u.Quantity) assert isinstance(pqd, Distribution) assert isinstance(pqd.value, Distribution) assert_array_equal(pqd.value.distribution, self.parr) def test_quantity_init_T(self): # Test that we can initialize directly from a Quantity pq = self.parr_t << u.ct Distribution(pq.T) def test_quantity_init_with_distribution(self): # Test that we can initialize a Quantity from a Distribution. pd = Distribution(self.parr) qpd = pd << u.ct assert isinstance(qpd, u.Quantity) assert isinstance(qpd, Distribution) assert qpd.unit == u.ct assert_array_equal(qpd.value.distribution, pd.distribution.astype(float)) def test_init_scalar(): parr = np.random.poisson(np.array([1, 5, 30, 400])[:, np.newaxis], (4, 1000)) with pytest.raises( TypeError, match=r"Attempted to initialize a Distribution with a scalar" ): Distribution(parr.ravel()[0]) class TestDistributionStatistics: def setup_class(self): with NumpyRNGContext(12345): self.data = np.random.normal( np.array([1, 2, 3, 4])[:, np.newaxis], np.array([3, 2, 4, 5])[:, np.newaxis], (4, 10000), ) self.distr = Distribution(self.data * u.kpc) def test_shape(self): # Distribution shape assert self.distr.shape == (4,) assert self.distr.distribution.shape == (4, 10000) def test_size(self): # Total number of values assert self.distr.size == 4 assert self.distr.distribution.size == 40000 def test_n_samples(self): # Number of samples assert self.distr.n_samples == 10000 def test_n_distr(self): assert self.distr.shape == (4,) def test_pdf_mean(self): # Mean of each PDF expected = np.mean(self.data, axis=-1) * self.distr.unit pdf_mean = self.distr.pdf_mean() assert_quantity_allclose(pdf_mean, expected) assert_quantity_allclose(pdf_mean, [1, 2, 3, 4] * self.distr.unit, rtol=0.05) # make sure the right type comes out - should be a Quantity because it's # now a summary statistic assert not isinstance(pdf_mean, Distribution) assert isinstance(pdf_mean, u.Quantity) # Check with out argument. out = pdf_mean * 0.0 pdf_mean2 = self.distr.pdf_mean(out=out) assert pdf_mean2 is out assert np.all(pdf_mean2 == pdf_mean) def test_pdf_std(self): # Standard deviation of each PDF expected = np.std(self.data, axis=-1) * self.distr.unit pdf_std = self.distr.pdf_std() assert_quantity_allclose(pdf_std, expected) assert_quantity_allclose(pdf_std, [3, 2, 4, 5] * self.distr.unit, rtol=0.05) # make sure the right type comes out - should be a Quantity because it's # now a summary statistic assert not isinstance(pdf_std, Distribution) assert isinstance(pdf_std, u.Quantity) # Check with proper ddof, using out argument. out = pdf_std * 0.0 expected = np.std(self.data, axis=-1, ddof=1) * self.distr.unit pdf_std2 = self.distr.pdf_std(ddof=1, out=out) assert pdf_std2 is out assert np.all(pdf_std2 == expected) def test_pdf_var(self): # Variance of each PDF expected = np.var(self.data, axis=-1) * self.distr.unit**2 pdf_var = self.distr.pdf_var() assert_quantity_allclose(pdf_var, expected) assert_quantity_allclose( pdf_var, [9, 4, 16, 25] * self.distr.unit**2, rtol=0.1 ) # make sure the right type comes out - should be a Quantity because it's # now a summary statistic assert not isinstance(pdf_var, Distribution) assert isinstance(pdf_var, u.Quantity) # Check with proper ddof, using out argument. out = pdf_var * 0.0 expected = np.var(self.data, axis=-1, ddof=1) * self.distr.unit**2 pdf_var2 = self.distr.pdf_var(ddof=1, out=out) assert pdf_var2 is out assert np.all(pdf_var2 == expected) def test_pdf_median(self): # Median of each PDF expected = np.median(self.data, axis=-1) * self.distr.unit pdf_median = self.distr.pdf_median() assert_quantity_allclose(pdf_median, expected) assert_quantity_allclose(pdf_median, [1, 2, 3, 4] * self.distr.unit, rtol=0.1) # make sure the right type comes out - should be a Quantity because it's # now a summary statistic assert not isinstance(pdf_median, Distribution) assert isinstance(pdf_median, u.Quantity) # Check with out argument. out = pdf_median * 0.0 pdf_median2 = self.distr.pdf_median(out=out) assert pdf_median2 is out assert np.all(pdf_median2 == expected) @pytest.mark.skipif(not HAS_SCIPY, reason="no scipy") def test_pdf_mad_smad(self): # Median absolute deviation of each PDF median = np.median(self.data, axis=-1, keepdims=True) expected = np.median(np.abs(self.data - median), axis=-1) * self.distr.unit pdf_mad = self.distr.pdf_mad() assert_quantity_allclose(pdf_mad, expected) pdf_smad = self.distr.pdf_smad() assert_quantity_allclose(pdf_smad, pdf_mad * SMAD_FACTOR, rtol=1e-5) assert_quantity_allclose(pdf_smad, [3, 2, 4, 5] * self.distr.unit, rtol=0.05) # make sure the right type comes out - should be a Quantity because it's # now a summary statistic assert not isinstance(pdf_mad, Distribution) assert isinstance(pdf_mad, u.Quantity) assert not isinstance(pdf_smad, Distribution) assert isinstance(pdf_smad, u.Quantity) # Check out argument for smad (which checks mad too). out = pdf_smad * 0.0 pdf_smad2 = self.distr.pdf_smad(out=out) assert pdf_smad2 is out assert np.all(pdf_smad2 == pdf_smad) def test_percentile(self): expected = np.percentile(self.data, [10, 50, 90], axis=-1) * self.distr.unit percs = self.distr.pdf_percentiles([10, 50, 90]) assert_quantity_allclose(percs, expected) assert percs.shape == (3, 4) # make sure the right type comes out - should be a Quantity because it's # now a summary statistic assert not isinstance(percs, Distribution) assert isinstance(percs, u.Quantity) def test_add_quantity(self): distrplus = self.distr + [2000, 0, 0, 500] * u.pc expected = ( np.median(self.data, axis=-1) + np.array([2, 0, 0, 0.5]) ) * self.distr.unit assert_quantity_allclose(distrplus.pdf_median(), expected) expected = np.var(self.data, axis=-1) * self.distr.unit**2 assert_quantity_allclose(distrplus.pdf_var(), expected) def test_add_distribution(self): another_data = ( np.random.randn(4, 10000) * np.array([1000, 0.01, 80, 10])[:, np.newaxis] + np.array([2000, 0, 0, 500])[:, np.newaxis] ) # another_data is in pc, but main distr is in kpc another_distr = Distribution(another_data * u.pc) combined_distr = self.distr + another_distr expected = np.median(self.data + another_data / 1000, axis=-1) * self.distr.unit assert_quantity_allclose(combined_distr.pdf_median(), expected) expected = ( np.var(self.data + another_data / 1000, axis=-1) * self.distr.unit**2 ) assert_quantity_allclose(combined_distr.pdf_var(), expected) def test_helper_normal_samples(): centerq = [1, 5, 30, 400] * u.kpc with NumpyRNGContext(12345): n_dist = ds.normal(centerq, std=[0.2, 1.5, 4, 1] * u.kpc, n_samples=100) assert n_dist.distribution.shape == (4, 100) assert n_dist.shape == (4,) assert n_dist.unit == u.kpc assert np.all(n_dist.pdf_std() > 100 * u.pc) n_dist2 = ds.normal(centerq, std=[0.2, 1.5, 4, 1] * u.pc, n_samples=20000) assert n_dist2.distribution.shape == (4, 20000) assert n_dist2.shape == (4,) assert n_dist2.unit == u.kpc assert np.all(n_dist2.pdf_std() < 100 * u.pc) def test_helper_poisson_samples(): centerqcounts = [1, 5, 30, 400] * u.count with NumpyRNGContext(12345): p_dist = ds.poisson(centerqcounts, n_samples=100) assert p_dist.shape == (4,) assert p_dist.distribution.shape == (4, 100) assert p_dist.unit == u.count p_min = np.min(p_dist) assert isinstance(p_min, Distribution) assert p_min.shape == () assert np.all(p_min >= 0) assert np.all(np.abs(p_dist.pdf_mean() - centerqcounts) < centerqcounts) def test_helper_uniform_samples(): udist = ds.uniform(lower=[1, 2] * u.kpc, upper=[3, 4] * u.kpc, n_samples=1000) assert udist.shape == (2,) assert udist.distribution.shape == (2, 1000) assert np.all(np.min(udist.distribution, axis=-1) > [1, 2] * u.kpc) assert np.all(np.max(udist.distribution, axis=-1) < [3, 4] * u.kpc) # try the alternative creator udist = ds.uniform(center=[1, 3, 2] * u.pc, width=[5, 4, 3] * u.pc, n_samples=1000) assert udist.shape == (3,) assert udist.distribution.shape == (3, 1000) assert np.all(np.min(udist.distribution, axis=-1) > [-1.5, 1, 0.5] * u.pc) assert np.all(np.max(udist.distribution, axis=-1) < [3.5, 5, 3.5] * u.pc) def test_helper_normal_exact(): pytest.skip("distribution stretch goal not yet implemented") centerq = [1, 5, 30, 400] * u.kpc ds.normal(centerq, std=[0.2, 1.5, 4, 1] * u.kpc) ds.normal(centerq, var=[0.04, 2.25, 16, 1] * u.kpc**2) ds.normal(centerq, ivar=[25, 0.44444444, 0.625, 1] * u.kpc**-2) def test_helper_poisson_exact(): pytest.skip("distribution stretch goal not yet implemented") centerq = [1, 5, 30, 400] * u.one ds.poisson(centerq, n_samples=1000) with pytest.raises( u.UnitsError, match=r"Poisson distribution can only be computed for dimensionless quantities", ): centerq = [1, 5, 30, 400] * u.kpc ds.poisson(centerq, n_samples=1000) def test_reprs(): darr = np.arange(30).reshape(3, 10) distr = Distribution(darr * u.kpc) assert "n_samples=10" in repr(distr) assert "n_samples=10" in str(distr) assert r"n_{\rm samp}=10" in distr._repr_latex_() @pytest.mark.parametrize( "func, kws", [ (ds.normal, {"center": 0, "std": 2}), (ds.uniform, {"lower": 0, "upper": 2}), (ds.poisson, {"center": 2}), (ds.normal, {"center": 0 * u.count, "std": 2 * u.count}), (ds.uniform, {"lower": 0 * u.count, "upper": 2 * u.count}), (ds.poisson, {"center": 2 * u.count}), ], ) def test_wrong_kw_fails(func, kws): with pytest.raises(Exception): kw_temp = kws.copy() kw_temp["n_sample"] = 100 # note the missing "s" assert func(**kw_temp).n_samples == 100 kw_temp = kws.copy() kw_temp["n_samples"] = 100 assert func(**kw_temp).n_samples == 100 def test_index_assignment_quantity(): arr = np.random.randn(2, 1000) distr = Distribution(arr * u.kpc) d1q, d2q = distr assert isinstance(d1q, Distribution) assert isinstance(d2q, Distribution) ndistr = ds.normal(center=[1, 2] * u.kpc, std=[3, 4] * u.kpc, n_samples=1000) n1, n2 = ndistr assert isinstance(n1, ds.Distribution) assert isinstance(n2, ds.Distribution) def test_index_assignment_array(): arr = np.random.randn(2, 1000) distr = Distribution(arr) d1a, d2a = distr assert isinstance(d1a, Distribution) assert isinstance(d2a, Distribution) ndistr = ds.normal(center=[1, 2], std=[3, 4], n_samples=1000) n1, n2 = ndistr assert isinstance(n1, ds.Distribution) assert isinstance(n2, ds.Distribution) def test_histogram(): arr = np.random.randn(2, 3, 1000) distr = Distribution(arr) hist, bins = distr.pdf_histogram(bins=10) assert hist.shape == (2, 3, 10) assert bins.shape == (2, 3, 11) def test_array_repr_latex(): # as of this writing ndarray does not have a _repr_latex_, and this test # ensure distributions account for that. However, if in the future ndarray # gets a _repr_latex_, we can skip this. arr = np.random.randn(4, 1000) if hasattr(arr, "_repr_latex_"): pytest.skip("in this version of numpy, ndarray has a _repr_latex_") distr = Distribution(arr) assert distr._repr_latex_() is None def test_distr_to(): distr = ds.normal(10 * u.cm, n_samples=100, std=1 * u.cm) todistr = distr.to(u.m) assert_quantity_allclose(distr.pdf_mean().to(u.m), todistr.pdf_mean()) def test_distr_noq_to(): # this is an array distribution not a quantity distr = ds.normal(10, n_samples=100, std=1) with pytest.raises(AttributeError): distr.to(u.m) def test_distr_to_value(): distr = ds.normal(10 * u.cm, n_samples=100, std=1 * u.cm) tovdistr = distr.to_value(u.m) assert np.allclose(distr.pdf_mean().to_value(u.m), tovdistr.pdf_mean()) def test_distr_noq_to_value(): distr = ds.normal(10, n_samples=100, std=1) with pytest.raises(AttributeError): distr.to_value(u.m) def test_distr_angle(): # Check that Quantity subclasses decay to Quantity appropriately. distr = Distribution([2.0, 3.0, 4.0]) ad = Angle(distr, "deg") ad_plus_ad = ad + ad assert isinstance(ad_plus_ad, Angle) assert isinstance(ad_plus_ad, Distribution) ad_times_ad = ad * ad assert not isinstance(ad_times_ad, Angle) assert isinstance(ad_times_ad, u.Quantity) assert isinstance(ad_times_ad, Distribution) ad += ad assert isinstance(ad, Angle) assert isinstance(ad, Distribution) assert_array_equal(ad.distribution, ad_plus_ad.distribution) with pytest.raises(u.UnitTypeError): ad *= ad def test_distr_angle_view_as_quantity(): # Check that Quantity subclasses decay to Quantity appropriately. distr = Distribution([2.0, 3.0, 4.0]) ad = Angle(distr, "deg") qd = ad.view(u.Quantity) assert not isinstance(qd, Angle) assert isinstance(qd, u.Quantity) assert isinstance(qd, Distribution) # View directly as DistributionQuantity class. qd2 = ad.view(qd.__class__) assert not isinstance(qd2, Angle) assert isinstance(qd2, u.Quantity) assert isinstance(qd2, Distribution) assert_array_equal(qd2.distribution, qd.distribution) qd3 = ad.view(qd.dtype, qd.__class__) assert not isinstance(qd3, Angle) assert isinstance(qd3, u.Quantity) assert isinstance(qd3, Distribution) assert_array_equal(qd3.distribution, qd.distribution) def test_distr_cannot_view_new_dtype(): # A Distribution has a very specific structured dtype with just one # element that holds the array of samples. As it is not clear what # to do with a view as a new dtype, we just error on it. # TODO: with a lot of thought, this restriction can likely be relaxed. distr = Distribution([2.0, 3.0, 4.0]) with pytest.raises(ValueError, match="with a new dtype"): distr.view(np.dtype("f8")) # Check subclass just in case. ad = Angle(distr, "deg") with pytest.raises(ValueError, match="with a new dtype"): ad.view(np.dtype("f8")) with pytest.raises(ValueError, match="with a new dtype"): ad.view(np.dtype("f8"), Distribution) def test_scalar_quantity_distribution(): # Regression test for gh-12336 angles = Distribution([90.0, 30.0, 0.0] * u.deg) sin_angles = np.sin(angles) # This failed in 4.3. assert isinstance(sin_angles, Distribution) assert isinstance(sin_angles, u.Quantity) assert_array_equal(sin_angles, Distribution(np.sin([90.0, 30.0, 0.0] * u.deg)))
4e3a09da0afc4106d5a51817beabbecb613e0e7c548713ef3df53b3fe5214b88
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Module to test statistic functions """ import numpy as np # pylint: disable=invalid-name import pytest from numpy.testing import assert_almost_equal from astropy.modeling.models import Identity, Mapping from astropy.modeling.statistic import ( leastsquare, leastsquare_1d, leastsquare_2d, leastsquare_3d, ) class TestLeastSquare_XD: """Tests for leastsquare with pre-specified number of dimensions.""" @classmethod def setup_class(cls): cls.model1D = Identity(n_inputs=1) cls.model2D = Identity(n_inputs=2) | Mapping((0,), n_inputs=2) cls.model3D = Identity(n_inputs=3) | Mapping((0,), n_inputs=3) cls.data = cls.x = cls.y = cls.z = np.linspace(0, 10, num=100) cls.lsq_exp = 0 def test_1d_no_weights(self): lsq = leastsquare_1d(self.data, self.model1D, None, self.x) assert_almost_equal(lsq, self.lsq_exp) def test_1d_with_weights(self): lsq = leastsquare_1d(self.data, self.model1D, np.ones(100), self.x) assert_almost_equal(lsq, self.lsq_exp) def test_2d_no_weights(self): lsq = leastsquare_2d(self.data, self.model2D, None, self.x, self.y) assert_almost_equal(lsq, self.lsq_exp) def test_2d_with_weights(self): lsq = leastsquare_2d(self.data, self.model2D, np.ones(100), self.x, self.y) assert_almost_equal(lsq, self.lsq_exp) def test_3d_no_weights(self): lsq = leastsquare_3d(self.data, self.model3D, None, self.x, self.y, self.z) assert_almost_equal(lsq, self.lsq_exp) def test_3d_with_weights(self): lsq = leastsquare_3d( self.data, self.model3D, np.ones(100), self.x, self.y, self.z ) assert_almost_equal(lsq, self.lsq_exp) class TestLeastSquare_ND: """Tests for leastsquare.""" @classmethod def setup_class(cls): cls.model1D = Identity(n_inputs=1) cls.model3D = Identity(n_inputs=3) | Mapping((0,), n_inputs=3) cls.data = cls.x = cls.y = cls.z = np.linspace(0, 10, num=100) cls.lsq_exp = 0 def test_1d_no_weights(self): lsq = leastsquare(self.data, self.model1D, None, self.x) assert_almost_equal(lsq, self.lsq_exp) def test_1d_with_weights(self): lsq = leastsquare(self.data, self.model1D, np.ones(100), self.x) assert_almost_equal(lsq, self.lsq_exp) def test_3d_no_weights(self): lsq = leastsquare(self.data, self.model3D, None, self.x, self.y, self.z) assert_almost_equal(lsq, self.lsq_exp) def test_3d_with_weights(self): lsq = leastsquare(self.data, self.model3D, np.ones(100), self.x, self.y, self.z) assert_almost_equal(lsq, self.lsq_exp) def test_shape_mismatch(self): MESSAGE = r"Shape mismatch between model .* and measured .*" with pytest.raises(ValueError, match=MESSAGE): leastsquare(0, self.model1D, None, self.x)
d90d2c729499b77af8b81183783c26720970d39791adbe98b138dd6693d22ce9
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Test sky projections defined in WCS Paper II""" # pylint: disable=invalid-name, no-member import os import unittest.mock as mk import numpy as np import pytest from numpy.testing import assert_allclose, assert_almost_equal from astropy import units as u from astropy import wcs from astropy.io import fits from astropy.modeling import projections from astropy.modeling.parameters import InputParameterError from astropy.tests.helper import assert_quantity_allclose from astropy.utils.data import get_pkg_data_filename def test_new_wcslib_projections(): # Test that we are aware of all WCSLIB projections. # Dectect if a new WCSLIB release introduced new projections. assert not set(wcs.PRJ_CODES).symmetric_difference( projections.projcodes + projections._NOT_SUPPORTED_PROJ_CODES ) def test_Projection_properties(): projection = projections.Sky2Pix_PlateCarree() assert projection.n_inputs == 2 assert projection.n_outputs == 2 PIX_COORDINATES = [-10, 30] MAPS_DIR = os.path.join(os.pardir, os.pardir, "wcs", "tests", "data", "maps") pars = [(x,) for x in projections.projcodes] # There is no groundtruth file for the XPH projection available here: # https://www.atnf.csiro.au/people/mcalabre/WCS/example_data.html pars.remove(("XPH",)) @pytest.mark.parametrize(("code",), pars) def test_Sky2Pix(code): """Check astropy model eval against wcslib eval""" wcs_map = os.path.join(MAPS_DIR, f"1904-66_{code}.hdr") test_file = get_pkg_data_filename(wcs_map) header = fits.Header.fromfile(test_file, endcard=False, padding=False) params = [] for i in range(3): key = f"PV2_{i + 1}" if key in header: params.append(header[key]) w = wcs.WCS(header) w.wcs.crval = [0.0, 0.0] w.wcs.crpix = [0, 0] w.wcs.cdelt = [1, 1] wcslibout = w.wcs.p2s([PIX_COORDINATES], 1) wcs_pix = w.wcs.s2p(wcslibout["world"], 1)["pixcrd"] model = getattr(projections, "Sky2Pix_" + code) tinv = model(*params) x, y = tinv(wcslibout["phi"], wcslibout["theta"]) assert_almost_equal(np.asarray(x), wcs_pix[:, 0]) assert_almost_equal(np.asarray(y), wcs_pix[:, 1]) assert isinstance(tinv.prjprm, wcs.Prjprm) @pytest.mark.parametrize(("code",), pars) def test_Pix2Sky(code): """Check astropy model eval against wcslib eval""" wcs_map = os.path.join(MAPS_DIR, f"1904-66_{code}.hdr") test_file = get_pkg_data_filename(wcs_map) header = fits.Header.fromfile(test_file, endcard=False, padding=False) params = [] for i in range(3): key = f"PV2_{i + 1}" if key in header: params.append(header[key]) w = wcs.WCS(header) w.wcs.crval = [0.0, 0.0] w.wcs.crpix = [0, 0] w.wcs.cdelt = [1, 1] wcslibout = w.wcs.p2s([PIX_COORDINATES], 1) wcs_phi = wcslibout["phi"] wcs_theta = wcslibout["theta"] model = getattr(projections, "Pix2Sky_" + code) tanprj = model(*params) phi, theta = tanprj(*PIX_COORDINATES) assert_almost_equal(np.asarray(phi), wcs_phi) assert_almost_equal(np.asarray(theta), wcs_theta) @pytest.mark.parametrize(("code",), pars) def test_Sky2Pix_unit(code): """Check astropy model eval against wcslib eval""" wcs_map = os.path.join(MAPS_DIR, f"1904-66_{code}.hdr") test_file = get_pkg_data_filename(wcs_map) header = fits.Header.fromfile(test_file, endcard=False, padding=False) params = [] for i in range(3): key = f"PV2_{i + 1}" if key in header: params.append(header[key]) w = wcs.WCS(header) w.wcs.crval = [0.0, 0.0] w.wcs.crpix = [0, 0] w.wcs.cdelt = [1, 1] wcslibout = w.wcs.p2s([PIX_COORDINATES], 1) wcs_pix = w.wcs.s2p(wcslibout["world"], 1)["pixcrd"] model = getattr(projections, "Sky2Pix_" + code) tinv = model(*params) x, y = tinv(wcslibout["phi"] * u.deg, wcslibout["theta"] * u.deg) assert_quantity_allclose(x, wcs_pix[:, 0] * u.deg) assert_quantity_allclose(y, wcs_pix[:, 1] * u.deg) @pytest.mark.parametrize(("code",), pars) def test_Pix2Sky_unit(code): """Check astropy model eval against wcslib eval""" wcs_map = os.path.join(MAPS_DIR, f"1904-66_{code}.hdr") test_file = get_pkg_data_filename(wcs_map) header = fits.Header.fromfile(test_file, endcard=False, padding=False) params = [] for i in range(3): key = f"PV2_{i + 1}" if key in header: params.append(header[key]) w = wcs.WCS(header) w.wcs.crval = [0.0, 0.0] w.wcs.crpix = [0, 0] w.wcs.cdelt = [1, 1] wcslibout = w.wcs.p2s([PIX_COORDINATES], 1) wcs_phi = wcslibout["phi"] wcs_theta = wcslibout["theta"] model = getattr(projections, "Pix2Sky_" + code) tanprj = model(*params) phi, theta = tanprj(*PIX_COORDINATES * u.deg) assert_quantity_allclose(phi, wcs_phi * u.deg) assert_quantity_allclose(theta, wcs_theta * u.deg) phi, theta = tanprj(*(PIX_COORDINATES * u.deg).to(u.rad)) assert_quantity_allclose(phi, wcs_phi * u.deg) assert_quantity_allclose(theta, wcs_theta * u.deg) phi, theta = tanprj(*(PIX_COORDINATES * u.deg).to(u.arcmin)) assert_quantity_allclose(phi, wcs_phi * u.deg) assert_quantity_allclose(theta, wcs_theta * u.deg) @pytest.mark.parametrize(("code",), pars) def test_projection_default(code): """Check astropy model eval with default parameters""" # Just makes sure that the default parameter values are reasonable # and accepted by wcslib. model = getattr(projections, "Sky2Pix_" + code) tinv = model() x, y = tinv(45, 45) model = getattr(projections, "Pix2Sky_" + code) tinv = model() x, y = tinv(0, 0) class TestZenithalPerspective: """Test Zenithal Perspective projection""" def setup_class(self): ID = "AZP" wcs_map = os.path.join(MAPS_DIR, f"1904-66_{ID}.hdr") test_file = get_pkg_data_filename(wcs_map) header = fits.Header.fromfile(test_file, endcard=False, padding=False) self.wazp = wcs.WCS(header) self.wazp.wcs.crpix = np.array([0.0, 0.0]) self.wazp.wcs.crval = np.array([0.0, 0.0]) self.wazp.wcs.cdelt = np.array([1.0, 1.0]) self.pv_kw = [kw[2] for kw in self.wazp.wcs.get_pv()] self.azp = projections.Pix2Sky_ZenithalPerspective(*self.pv_kw) def test_AZP_p2s(self): wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1) wcs_phi = wcslibout["phi"] wcs_theta = wcslibout["theta"] phi, theta = self.azp(-10, 30) assert_almost_equal(np.asarray(phi), wcs_phi) assert_almost_equal(np.asarray(theta), wcs_theta) def test_AZP_s2p(self): wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1) wcs_pix = self.wazp.wcs.s2p(wcslibout["world"], 1)["pixcrd"] x, y = self.azp.inverse(wcslibout["phi"], wcslibout["theta"]) assert_almost_equal(np.asarray(x), wcs_pix[:, 0]) assert_almost_equal(np.asarray(y), wcs_pix[:, 1]) def test_validate(self): MESSAGE = r"Zenithal perspective projection is not defined for mu = -1" with pytest.raises(InputParameterError, match=MESSAGE): projections.Pix2Sky_ZenithalPerspective(-1) with pytest.raises(InputParameterError, match=MESSAGE): projections.Sky2Pix_ZenithalPerspective(-1) with pytest.raises(InputParameterError, match=MESSAGE): projections.Pix2Sky_SlantZenithalPerspective(-1) with pytest.raises(InputParameterError, match=MESSAGE): projections.Sky2Pix_SlantZenithalPerspective(-1) class TestCylindricalPerspective: """Test cylindrical perspective projection""" def setup_class(self): ID = "CYP" wcs_map = os.path.join(MAPS_DIR, f"1904-66_{ID}.hdr") test_file = get_pkg_data_filename(wcs_map) header = fits.Header.fromfile(test_file, endcard=False, padding=False) self.wazp = wcs.WCS(header) self.wazp.wcs.crpix = np.array([0.0, 0.0]) self.wazp.wcs.crval = np.array([0.0, 0.0]) self.wazp.wcs.cdelt = np.array([1.0, 1.0]) self.pv_kw = [kw[2] for kw in self.wazp.wcs.get_pv()] self.azp = projections.Pix2Sky_CylindricalPerspective(*self.pv_kw) def test_CYP_p2s(self): wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1) wcs_phi = wcslibout["phi"] wcs_theta = wcslibout["theta"] phi, theta = self.azp(-10, 30) assert_almost_equal(np.asarray(phi), wcs_phi) assert_almost_equal(np.asarray(theta), wcs_theta) def test_CYP_s2p(self): wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1) wcs_pix = self.wazp.wcs.s2p(wcslibout["world"], 1)["pixcrd"] x, y = self.azp.inverse(wcslibout["phi"], wcslibout["theta"]) assert_almost_equal(np.asarray(x), wcs_pix[:, 0]) assert_almost_equal(np.asarray(y), wcs_pix[:, 1]) def test_validate(self): MESSAGE = r"CYP projection is not defined for .*" MESSAGE0 = r"CYP projection is not defined for mu = -lambda" MESSAGE1 = r"CYP projection is not defined for lambda = -mu" # Pix2Sky_CylindricalPerspective with pytest.raises(InputParameterError, match=MESSAGE): projections.Pix2Sky_CylindricalPerspective(1, -1) with pytest.raises(InputParameterError, match=MESSAGE): projections.Pix2Sky_CylindricalPerspective(-1, 1) model = projections.Pix2Sky_CylindricalPerspective() with pytest.raises(InputParameterError, match=MESSAGE0): model.mu = -1 with pytest.raises(InputParameterError, match=MESSAGE1): model.lam = -1 # Sky2Pix_CylindricalPerspective with pytest.raises(InputParameterError, match=MESSAGE): projections.Sky2Pix_CylindricalPerspective(1, -1) with pytest.raises(InputParameterError, match=MESSAGE): projections.Sky2Pix_CylindricalPerspective(-1, 1) model = projections.Sky2Pix_CylindricalPerspective() with pytest.raises(InputParameterError, match=MESSAGE0): model.mu = -1 with pytest.raises(InputParameterError, match=MESSAGE1): model.lam = -1 def test_AffineTransformation2D(): # Simple test with a scale and translation model = projections.AffineTransformation2D( matrix=[[2, 0], [0, 2]], translation=[1, 1] ) # Coordinates for vertices of a rectangle rect = [[0, 0], [1, 0], [0, 3], [1, 3]] x, y = zip(*rect) new_rect = np.vstack(model(x, y)).T assert np.all(new_rect == [[1, 1], [3, 1], [1, 7], [3, 7]]) # Matrix validation error MESSAGE = r"Expected transformation matrix to be a 2x2 array" with pytest.raises(InputParameterError, match=MESSAGE): model.matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] # Translation validation error MESSAGE = ( r"Expected translation vector to be a 2 element row or column vector array" ) with pytest.raises(InputParameterError, match=MESSAGE): model.translation = [1, 2, 3] with pytest.raises(InputParameterError, match=MESSAGE): model.translation = [[1], [2]] with pytest.raises(InputParameterError, match=MESSAGE): model.translation = [[1, 2, 3]] # Incompatible shape error a = np.array([[1], [2], [3], [4]]) b = a.ravel() with mk.patch.object(np, "vstack", autospec=True, side_effect=[a, b]) as mk_vstack: MESSAGE = r"Incompatible input shapes" with pytest.raises(ValueError, match=MESSAGE): model(x, y) with pytest.raises(ValueError, match=MESSAGE): model(x, y) assert mk_vstack.call_count == 2 # Input shape evaluation error x = np.array([1, 2]) y = np.array([1, 2, 3]) MESSAGE = r"Expected input arrays to have the same shape" with pytest.raises(ValueError, match=MESSAGE): model.evaluate(x, y, model.matrix, model.translation) def test_AffineTransformation2D_inverse(): # Test non-invertible model model1 = projections.AffineTransformation2D(matrix=[[1, 1], [1, 1]]) MESSAGE = r"Transformation matrix is singular; .* model does not have an inverse" with pytest.raises(InputParameterError, match=MESSAGE): model1.inverse model2 = projections.AffineTransformation2D( matrix=[[1.2, 3.4], [5.6, 7.8]], translation=[9.1, 10.11] ) # Coordinates for vertices of a rectangle rect = [[0, 0], [1, 0], [0, 3], [1, 3]] x, y = zip(*rect) x_new, y_new = model2.inverse(*model2(x, y)) assert_allclose([x, y], [x_new, y_new], atol=1e-10) model3 = projections.AffineTransformation2D( matrix=[[1.2, 3.4], [5.6, 7.8]] * u.m, translation=[9.1, 10.11] * u.m ) x_new, y_new = model3.inverse(*model3(x * u.m, y * u.m)) assert_allclose([x, y], [x_new, y_new], atol=1e-10) model4 = projections.AffineTransformation2D( matrix=[[1.2, 3.4], [5.6, 7.8]] * u.m, translation=[9.1, 10.11] * u.km ) MESSAGE = r"matrix and translation must have the same units" with pytest.raises(ValueError, match=MESSAGE): model4.inverse(*model4(x * u.m, y * u.m)) def test_c_projection_striding(): # This is just a simple test to make sure that the striding is # handled correctly in the projection C extension coords = np.arange(10).reshape((5, 2)) model = projections.Sky2Pix_ZenithalPerspective(2, 30) phi, theta = model(coords[:, 0], coords[:, 1]) assert_almost_equal(phi, [0.0, 2.2790416, 4.4889294, 6.6250643, 8.68301]) assert_almost_equal( theta, [-76.4816918, -75.3594654, -74.1256332, -72.784558, -71.3406629] ) def test_c_projections_shaped(): nx, ny = (5, 2) x = np.linspace(0, 1, nx) y = np.linspace(0, 1, ny) xv, yv = np.meshgrid(x, y) model = projections.Pix2Sky_TAN() phi, theta = model(xv, yv) assert_allclose( phi, [ [0.0, 90.0, 90.0, 90.0, 90.0], [180.0, 165.96375653, 153.43494882, 143.13010235, 135.0], ], ) assert_allclose( theta, [ [90.0, 89.75000159, 89.50001269, 89.25004283, 89.00010152], [89.00010152, 88.96933478, 88.88210788, 88.75019826, 88.58607353], ], ) def test_affine_with_quantities(): x = 1 y = 2 xdeg = (x * u.pix).to(u.deg, equivalencies=u.pixel_scale(2.5 * u.deg / u.pix)) ydeg = (y * u.pix).to(u.deg, equivalencies=u.pixel_scale(2.5 * u.deg / u.pix)) xpix = x * u.pix ypix = y * u.pix # test affine with matrix only qaff = projections.AffineTransformation2D(matrix=[[1, 2], [2, 1]] * u.deg) MESSAGE = ( r"To use AffineTransformation with quantities, both matrix and unit need to be" r" quantities" ) with pytest.raises(ValueError, match=MESSAGE): qx1, qy1 = qaff( xpix, ypix, equivalencies={ "x": u.pixel_scale(2.5 * u.deg / u.pix), "y": u.pixel_scale(2.5 * u.deg / u.pix), }, ) # test affine with matrix and translation qaff = projections.AffineTransformation2D( matrix=[[1, 2], [2, 1]] * u.deg, translation=[1, 2] * u.deg ) qx1, qy1 = qaff( xpix, ypix, equivalencies={ "x": u.pixel_scale(2.5 * u.deg / u.pix), "y": u.pixel_scale(2.5 * u.deg / u.pix), }, ) aff = projections.AffineTransformation2D( matrix=[[1, 2], [2, 1]], translation=[1, 2] ) x1, y1 = aff(xdeg.value, ydeg.value) assert_quantity_allclose(qx1, x1 * u.deg) assert_quantity_allclose(qy1, y1 * u.deg) # test the case of WCS PC and CDELT transformations pc = np.array( [ [0.86585778922708, 0.50029020461607], [-0.50029020461607, 0.86585778922708], ] ) cdelt = np.array( [ [1, 3.0683055555556e-05], [3.0966944444444e-05, 1], ] ) matrix = cdelt * pc qaff = projections.AffineTransformation2D( matrix=matrix * u.deg, translation=[0, 0] * u.deg ) inv_matrix = np.linalg.inv(matrix) inv_qaff = projections.AffineTransformation2D( matrix=inv_matrix * u.pix, translation=[0, 0] * u.pix ) qaff.inverse = inv_qaff qx1, qy1 = qaff( xpix, ypix, equivalencies={ "x": u.pixel_scale(1 * u.deg / u.pix), "y": u.pixel_scale(1 * u.deg / u.pix), }, ) x1, y1 = qaff.inverse( qx1, qy1, equivalencies={ "x": u.pixel_scale(1 * u.deg / u.pix), "y": u.pixel_scale(1 * u.deg / u.pix), }, ) assert_quantity_allclose(x1, xpix) assert_quantity_allclose(y1, ypix) def test_Pix2Sky_ZenithalPerspective_inverse(): model = projections.Pix2Sky_ZenithalPerspective(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_ZenithalPerspective) assert inverse.mu == model.mu == 2 assert_allclose(inverse.gamma, model.gamma) assert_allclose(inverse.gamma, 30) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_ZenithalPerspective_inverse(): model = projections.Sky2Pix_ZenithalPerspective(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_AZP) assert inverse.mu == model.mu == 2 assert_allclose(inverse.gamma, model.gamma) assert_allclose(inverse.gamma, 30) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_SlantZenithalPerspective_inverse(): model = projections.Pix2Sky_SlantZenithalPerspective(2, 30, 40) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_SlantZenithalPerspective) assert inverse.mu == model.mu == 2 assert_allclose(inverse.phi0, model.phi0) assert_allclose(inverse.theta0, model.theta0) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_SlantZenithalPerspective_inverse(): model = projections.Sky2Pix_SlantZenithalPerspective(2, 30, 40) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_SlantZenithalPerspective) assert inverse.mu == model.mu == 2 assert_allclose(inverse.phi0, model.phi0) assert_allclose(inverse.theta0, model.theta0) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_Gnomonic_inverse(): model = projections.Pix2Sky_Gnomonic() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_Gnomonic) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_Gnomonic_inverse(): model = projections.Sky2Pix_Gnomonic() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_Gnomonic) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_Stereographic_inverse(): model = projections.Pix2Sky_Stereographic() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_Stereographic) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_Stereographic_inverse(): model = projections.Sky2Pix_Stereographic() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_Stereographic) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_SlantOrthographic_inverse(): model = projections.Pix2Sky_SlantOrthographic(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_SlantOrthographic) assert inverse.xi == model.xi == 2 assert inverse.eta == model.eta == 30 x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-8) assert_allclose(b, y, atol=1e-8) def test_Sky2Pix_SlantOrthographic_inverse(): model = projections.Sky2Pix_SlantOrthographic(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_SlantOrthographic) assert inverse.xi == model.xi == 2 assert inverse.eta == model.eta == 30 x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-8) assert_allclose(b, y, atol=1e-8) def test_Pix2Sky_ZenithalEquidistant_inverse(): model = projections.Pix2Sky_ZenithalEquidistant() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_ZenithalEquidistant) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_ZenithalEquidistant_inverse(): model = projections.Sky2Pix_ZenithalEquidistant() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_ZenithalEquidistant) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_ZenithalEqualArea_inverse(): model = projections.Pix2Sky_ZenithalEqualArea() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_ZenithalEqualArea) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_ZenithalEqualArea_inverse(): model = projections.Sky2Pix_ZenithalEqualArea() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_ZenithalEqualArea) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_Airy_inverse(): model = projections.Pix2Sky_Airy(30) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_Airy) assert inverse.theta_b == model.theta_b == 30 x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_Airy_inverse(): model = projections.Sky2Pix_Airy(30) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_Airy) assert inverse.theta_b == model.theta_b == 30 x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_CylindricalPerspective_inverse(): model = projections.Pix2Sky_CylindricalPerspective(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_CylindricalPerspective) assert inverse.mu == model.mu == 2 assert inverse.lam == model.lam == 30 x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_CylindricalPerspective_inverse(): model = projections.Sky2Pix_CylindricalPerspective(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_CylindricalPerspective) assert inverse.mu == model.mu == 2 assert inverse.lam == model.lam == 30 x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_CylindricalEqualArea_inverse(): model = projections.Pix2Sky_CylindricalEqualArea(0.567) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_CylindricalEqualArea) assert inverse.lam == model.lam == 0.567 def test_Sky2Pix_CylindricalEqualArea_inverse(): model = projections.Sky2Pix_CylindricalEqualArea(0.765) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_CylindricalEqualArea) assert inverse.lam == model.lam == 0.765 def test_Pix2Sky_PlateCarree_inverse(): model = projections.Pix2Sky_PlateCarree() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_PlateCarree) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_PlateCarree_inverse(): model = projections.Sky2Pix_PlateCarree() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_PlateCarree) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_Mercator_inverse(): model = projections.Pix2Sky_Mercator() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_Mercator) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_Mercator_inverse(): model = projections.Sky2Pix_Mercator() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_Mercator) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_SansonFlamsteed_inverse(): model = projections.Pix2Sky_SansonFlamsteed() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_SansonFlamsteed) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_SansonFlamsteed_inverse(): model = projections.Sky2Pix_SansonFlamsteed() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_SansonFlamsteed) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_Parabolic_inverse(): model = projections.Pix2Sky_Parabolic() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_Parabolic) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_Parabolic_inverse(): model = projections.Sky2Pix_Parabolic() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_Parabolic) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_Molleweide_inverse(): model = projections.Pix2Sky_Molleweide() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_Molleweide) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_Molleweide_inverse(): model = projections.Sky2Pix_Molleweide() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_Molleweide) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_HammerAitoff_inverse(): model = projections.Pix2Sky_HammerAitoff() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_HammerAitoff) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_HammerAitoff_inverse(): model = projections.Sky2Pix_HammerAitoff() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_HammerAitoff) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_ConicPerspective_inverse(): model = projections.Pix2Sky_ConicPerspective(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_ConicPerspective) assert inverse.sigma == model.sigma == 2 assert_allclose(inverse.delta, model.delta) assert_allclose(inverse.delta, 30) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_ConicPerspective_inverse(): model = projections.Sky2Pix_ConicPerspective(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_ConicPerspective) assert inverse.sigma == model.sigma == 2 assert_allclose(inverse.delta, model.delta) assert_allclose(inverse.delta, 30) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_ConicEqualArea_inverse(): model = projections.Pix2Sky_ConicEqualArea(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_ConicEqualArea) assert inverse.sigma == model.sigma == 2 assert_allclose(inverse.delta, model.delta) assert_allclose(inverse.delta, 30) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_ConicEqualArea_inverse(): model = projections.Sky2Pix_ConicEqualArea(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_ConicEqualArea) assert inverse.sigma == model.sigma == 2 assert_allclose(inverse.delta, model.delta) assert_allclose(inverse.delta, 30) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_ConicEquidistant_inverse(): model = projections.Pix2Sky_ConicEquidistant(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_ConicEquidistant) assert inverse.sigma == model.sigma == 2 assert_allclose(inverse.delta, model.delta) assert_allclose(inverse.delta, 30) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_ConicEquidistant_inverse(): model = projections.Sky2Pix_ConicEquidistant(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_ConicEquidistant) assert inverse.sigma == model.sigma == 2 assert_allclose(inverse.delta, model.delta) assert_allclose(inverse.delta, 30) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_ConicOrthomorphic_inverse(): model = projections.Pix2Sky_ConicOrthomorphic(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_ConicOrthomorphic) assert inverse.sigma == model.sigma == 2 assert_allclose(inverse.delta, model.delta) assert_allclose(inverse.delta, 30) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_ConicOrthomorphic_inverse(): model = projections.Sky2Pix_ConicOrthomorphic(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_ConicOrthomorphic) assert inverse.sigma == model.sigma == 2 assert_allclose(inverse.delta, model.delta) assert_allclose(inverse.delta, 30) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_BonneEqualArea_inverse(): model = projections.Pix2Sky_BonneEqualArea(2) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_BonneEqualArea) assert inverse.theta1 == model.theta1 == 2 x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_BonneEqualArea_inverse(): model = projections.Sky2Pix_BonneEqualArea(2) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_BonneEqualArea) assert inverse.theta1 == model.theta1 == 2 x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_Polyconic_inverse(): model = projections.Pix2Sky_Polyconic() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_Polyconic) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_Polyconic_inverse(): model = projections.Sky2Pix_Polyconic() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_Polyconic) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_TangentialSphericalCube_inverse(): model = projections.Pix2Sky_TangentialSphericalCube() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_TangentialSphericalCube) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_TangentialSphericalCube_inverse(): model = projections.Sky2Pix_TangentialSphericalCube() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_TangentialSphericalCube) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_COBEQuadSphericalCube_inverse(): model = projections.Pix2Sky_COBEQuadSphericalCube() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_COBEQuadSphericalCube) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-3) assert_allclose(b, y, atol=1e-3) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-3) assert_allclose(b, y, atol=1e-3) def test_Sky2Pix_COBEQuadSphericalCube_inverse(): model = projections.Sky2Pix_COBEQuadSphericalCube() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_COBEQuadSphericalCube) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-3) assert_allclose(b, y, atol=1e-3) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-3) assert_allclose(b, y, atol=1e-3) def test_Pix2Sky_QuadSphericalCube_inverse(): model = projections.Pix2Sky_QuadSphericalCube() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_QuadSphericalCube) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_QuadSphericalCube_inverse(): model = projections.Sky2Pix_QuadSphericalCube() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_QuadSphericalCube) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_HEALPix_inverse(): model = projections.Pix2Sky_HEALPix(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_HEALPix) assert inverse.H == model.H == 2 assert inverse.X == model.X == 30 x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_HEALPix_inverse(): model = projections.Sky2Pix_HEALPix(2, 30) inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_HEALPix) assert inverse.H == model.H == 2 assert inverse.X == model.X == 30 x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Pix2Sky_HEALPixPolar_inverse(): model = projections.Pix2Sky_HEALPixPolar() inverse = model.inverse assert isinstance(inverse, projections.Sky2Pix_HEALPixPolar) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) def test_Sky2Pix_HEALPixPolar_inverse(): model = projections.Sky2Pix_HEALPixPolar() inverse = model.inverse assert isinstance(inverse, projections.Pix2Sky_HEALPixPolar) x = np.linspace(0, 1, 100) y = np.linspace(0, 1, 100) a, b = model(*inverse(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12) a, b = inverse(*model(x, y)) assert_allclose(a, x, atol=1e-12) assert_allclose(b, y, atol=1e-12)
a3b82f09857bd8d74e0e224bae65291a2dd58e37f389fa34ad1d066965221610
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module provides functions to help with testing against iraf tasks """ import numpy as np from astropy.logger import log iraf_models_map = {1.0: "Chebyshev", 2.0: "Legendre", 3.0: "Spline3", 4.0: "Spline1"} def get_records(fname): """ Read the records of an IRAF database file into a python list Parameters ---------- fname : str name of an IRAF database file Returns ------- A list of records """ f = open(fname) dtb = f.read() f.close() recs = dtb.split("begin")[1:] records = [Record(r) for r in recs] return records def get_database_string(fname): """ Read an IRAF database file Parameters ---------- fname : str name of an IRAF database file Returns ------- the database file as a string """ f = open(fname) dtb = f.read() f.close() return dtb class Record: """ A base class for all records - represents an IRAF database record Attributes ---------- recstr: string the record as a string fields: dict the fields in the record taskname: string the name of the task which created the database file """ def __init__(self, recstr): self.recstr = recstr self.fields = self.get_fields() self.taskname = self.get_task_name() def aslist(self): reclist = self.recstr.split("\n") reclist = [entry.strip() for entry in reclist] [reclist.remove(entry) for entry in reclist if len(entry) == 0] return reclist def get_fields(self): # read record fields as an array fields = {} flist = self.aslist() numfields = len(flist) for i in range(numfields): line = flist[i] if line and line[0].isalpha(): field = line.split() if i + 1 < numfields: if not flist[i + 1][0].isalpha(): fields[field[0]] = self.read_array_field( flist[i : i + int(field[1]) + 1] ) else: fields[field[0]] = " ".join(s for s in field[1:]) else: fields[field[0]] = " ".join(s for s in field[1:]) else: continue return fields def get_task_name(self): try: return self.fields["task"] except KeyError: return None def read_array_field(self, fieldlist): # Turn an iraf record array field into a numpy array fieldline = [entry.split() for entry in fieldlist[1:]] # take only the first 3 columns # identify writes also strings at the end of some field lines xyz = [entry[:3] for entry in fieldline] try: farr = np.array(xyz) except Exception: log.debug(f"Could not read array field {fieldlist[0].split()[0]}") return farr.astype(np.float64) class IdentifyRecord(Record): """ Represents a database record for the onedspec.identify task Attributes ---------- x: array the X values of the identified features this represents values on axis1 (image rows) y: int the Y values of the identified features (image columns) z: array the values which X maps into modelname: string the function used to fit the data nterms: int degree of the polynomial which was fit to the data in IRAF this is the number of coefficients, not the order mrange: list the range of the data coeff: array function (modelname) coefficients """ def __init__(self, recstr): super().__init__(recstr) self._flatcoeff = self.fields["coefficients"].flatten() self.x = self.fields["features"][:, 0] self.y = self.get_ydata() self.z = self.fields["features"][:, 1] self.modelname = self.get_model_name() self.nterms = self.get_nterms() self.mrange = self.get_range() self.coeff = self.get_coeff() def get_model_name(self): return iraf_models_map[self._flatcoeff[0]] def get_nterms(self): return self._flatcoeff[1] def get_range(self): low = self._flatcoeff[2] high = self._flatcoeff[3] return [low, high] def get_coeff(self): return self._flatcoeff[4:] def get_ydata(self): image = self.fields["image"] left = image.find("[") + 1 right = image.find("]") section = image[left:right] if "," in section: yind = image.find(",") + 1 return int(image[yind:-1]) else: return int(section) class FitcoordsRecord(Record): """ Represents a database record for the longslit.fitccords task Attributes ---------- modelname: string the function used to fit the data xorder: int number of terms in x yorder: int number of terms in y xbounds: list data range in x ybounds: list data range in y coeff: array function coefficients """ def __init__(self, recstr): super().__init__(recstr) self._surface = self.fields["surface"].flatten() self.modelname = iraf_models_map[self._surface[0]] self.xorder = self._surface[1] self.yorder = self._surface[2] self.xbounds = [self._surface[4], self._surface[5]] self.ybounds = [self._surface[6], self._surface[7]] self.coeff = self.get_coeff() def get_coeff(self): return self._surface[8:] class IDB: """ Base class for an IRAF identify database Attributes ---------- records: list a list of all `IdentifyRecord` in the database numrecords: int number of records """ def __init__(self, dtbstr): self.records = [IdentifyRecord(rstr) for rstr in self.aslist(dtbstr)] self.numrecords = len(self.records) def aslist(self, dtb): # return a list of records # if the first one is a comment remove it from the list rl = dtb.split("begin") try: rl0 = rl[0].split("\n") except Exception: return rl if len(rl0) == 2 and rl0[0].startswith("#") and not rl0[1].strip(): return rl[1:] else: return rl class ReidentifyRecord(IDB): """ Represents a database record for the onedspec.reidentify task """ def __init__(self, databasestr): super().__init__(databasestr) self.x = np.array([r.x for r in self.records]) self.y = self.get_ydata() self.z = np.array([r.z for r in self.records]) def get_ydata(self): y = np.ones(self.x.shape) y = y * np.array([r.y for r in self.records])[:, np.newaxis] return y
512b699eaed7eb7fc5513a55ddb086946024fd46c9971a9c69f52779e0db5a0a
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Here are all the test parameters and values for the each `~astropy.modeling.FittableModel` defined. There is a dictionary for 1D and a dictionary for 2D models. Explanation of keywords of the dictionaries: "parameters" : list or dict Model parameters, the model is tested with. Make sure you keep the right order. For polynomials you can also use a dict to specify the coefficients. See examples below. "x_values" : list x values where the model is evaluated. "y_values" : list Reference y values for the in x_values given positions. "z_values" : list Reference z values for the in x_values and y_values given positions. (2D model option) "x_lim" : list x test range for the model fitter. Depending on the model this can differ e.g. the PowerLaw model should be tested over a few magnitudes. "y_lim" : list y test range for the model fitter. Depending on the model this can differ e.g. the PowerLaw model should be tested over a few magnitudes. (2D model option) "log_fit" : bool PowerLaw models should be tested over a few magnitudes. So log_fit should be true. "requires_scipy" : bool If a model requires scipy (Bessel functions etc.) set this flag. "integral" : float Approximate value of the integral in the range x_lim (and y_lim). "deriv_parameters" : list If given the test of the derivative will use these parameters to create a model (optional) "deriv_initial" : list If given the test of the derivative will use these parameters as initial values for the fit (optional) """ import numpy as np from astropy.modeling.functional_models import ( AiryDisk2D, ArcCosine1D, ArcSine1D, ArcTangent1D, Box1D, Box2D, Const1D, Const2D, Cosine1D, Disk2D, Exponential1D, Gaussian1D, Gaussian2D, KingProjectedAnalytic1D, Linear1D, Logarithmic1D, Lorentz1D, Moffat1D, Moffat2D, Planar2D, RickerWavelet1D, RickerWavelet2D, Ring2D, Sersic1D, Sersic2D, Sine1D, Tangent1D, Trapezoid1D, TrapezoidDisk2D, Voigt1D, ) from astropy.modeling.physical_models import Drude1D, Plummer1D from astropy.modeling.polynomial import Polynomial1D, Polynomial2D from astropy.modeling.powerlaws import ( BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D, PowerLaw1D, Schechter1D, SmoothlyBrokenPowerLaw1D, ) # 1D Models models_1D = { Gaussian1D: { "parameters": [1, 0, 1], "x_values": [0, np.sqrt(2), -np.sqrt(2)], "y_values": [1.0, 0.367879, 0.367879], "x_lim": [-10, 10], "integral": np.sqrt(2 * np.pi), "bbox_peak": True, }, Sine1D: { "parameters": [1, 0.1, 0], "x_values": [0, 2.5], "y_values": [0, 1], "x_lim": [-10, 10], "integral": 0, }, Cosine1D: { "parameters": [1, 0.1, 0], "x_values": [0, 2.5], "y_values": [1, 0], "x_lim": [-10, 10], "integral": 0, }, Tangent1D: { "parameters": [1, 0.1, 0], "x_values": [0, 1.25], "y_values": [0, 1], "x_lim": [-10, 10], "integral": 0, }, ArcSine1D: { "parameters": [1, 0.1, 0], "x_values": [0, 1], "y_values": [0, 2.5], "x_lim": [-0.5, 0.5], "integral": 0, }, ArcCosine1D: { "parameters": [1, 0.1, 0], "x_values": [1, 0], "y_values": [0, 2.5], "x_lim": [-0.5, 0.5], "integral": 0, }, ArcTangent1D: { "parameters": [1, 0.1, 0], "x_values": [0, 1], "y_values": [0, 1.25], "x_lim": [-10, 10], "integral": 0, }, Box1D: { "parameters": [1, 0, 10], "x_values": [-5, 5, 0, -10, 10], "y_values": [1, 1, 1, 0, 0], "x_lim": [-10, 10], "integral": 10, "bbox_peak": True, }, Linear1D: { "parameters": [1, 0], "x_values": [0, np.pi, 42, -1], "y_values": [0, np.pi, 42, -1], "x_lim": [-10, 10], "integral": 0, }, Lorentz1D: { "parameters": [1, 0, 1], "x_values": [0, -1, 1, 0.5, -0.5], "y_values": [1.0, 0.2, 0.2, 0.5, 0.5], "x_lim": [-10, 10], "integral": 1, "bbox_peak": True, }, RickerWavelet1D: { "parameters": [1, 0, 1], "x_values": [0, 1, -1, 3, -3], "y_values": [1.0, 0.0, 0.0, -0.088872, -0.088872], "x_lim": [-20, 20], "integral": 0, "bbox_peak": True, }, Trapezoid1D: { "parameters": [1, 0, 2, 1], "x_values": [0, 1, -1, 1.5, -1.5, 2, 2], "y_values": [1, 1, 1, 0.5, 0.5, 0, 0], "x_lim": [-10, 10], "integral": 3, "bbox_peak": True, }, Const1D: { "parameters": [1], "x_values": [-1, 1, np.pi, -42.0, 0], "y_values": [1, 1, 1, 1, 1], "x_lim": [-10, 10], "integral": 20, }, Moffat1D: { "parameters": [1, 0, 1, 2], "x_values": [0, 1, -1, 3, -3], "y_values": [1.0, 0.25, 0.25, 0.01, 0.01], "x_lim": [-10, 10], "integral": 1, "deriv_parameters": [23.4, 1.2, 2.1, 2.3], "deriv_initial": [10, 1, 1, 1], }, PowerLaw1D: { "parameters": [1, 1, 2], "constraints": {"fixed": {"x_0": True}}, "x_values": [1, 10, 100], "y_values": [1.0, 0.01, 0.0001], "x_lim": [1, 10], "log_fit": True, "integral": 0.99, }, BrokenPowerLaw1D: { "parameters": [1, 1, 2, 3], "constraints": {"fixed": {"x_break": True}}, "x_values": [0.1, 1, 10, 100], "y_values": [1e2, 1.0, 1e-3, 1e-6], "x_lim": [0.1, 100], "log_fit": True, }, SmoothlyBrokenPowerLaw1D: { "parameters": [1, 1, -2, 2, 0.5], "constraints": {"fixed": {"x_break": True, "delta": True}}, "x_values": [0.01, 1, 100], "y_values": [3.99920012e-04, 1.0, 3.99920012e-04], "x_lim": [0.01, 100], "log_fit": True, }, ExponentialCutoffPowerLaw1D: { "parameters": [1, 1, 2, 3], "constraints": {"fixed": {"x_0": True}}, "x_values": [0.1, 1, 10, 100], "y_values": [9.67216100e01, 7.16531311e-01, 3.56739933e-04, 3.33823780e-19], "x_lim": [0.01, 100], "log_fit": True, }, Schechter1D: { "parameters": [1.0, -20.0, -1.0], "x_values": [-25.0, -23.0, -21.0, -19.0, -17.0], "y_values": [ 3.42631659e-44, 1.20551329e-07, 7.47097466e-02, 6.18557294e-01, 8.64716111e-01, ], "x_lim": [-25, -17.0], }, LogParabola1D: { "parameters": [1, 2, 3, 0.1], "constraints": {"fixed": {"x_0": True}}, "x_values": [0.1, 1, 10, 100], "y_values": [3.26089063e03, 7.62472488e00, 6.17440488e-03, 1.73160572e-06], "x_lim": [0.1, 100], "log_fit": True, }, Polynomial1D: { "parameters": {"degree": 2, "c0": 1.0, "c1": 1.0, "c2": 1.0}, "x_values": [1, 10, 100], "y_values": [3, 111, 10101], "x_lim": [-3, 3], }, Sersic1D: { "parameters": [1, 20, 4], "x_values": [0.1, 1, 10, 100], "y_values": [2.78629391e02, 5.69791430e01, 3.38788244e00, 2.23941982e-02], "requires_scipy": True, "x_lim": [0, 10], "log_fit": True, }, Voigt1D: { "parameters": [0, 1, 0.5, 0.9], "x_values": [0, 0.2, 0.5, 1, 2, 4, 8, 20], "y_values": [ 0.52092360, 0.479697445, 0.317550374, 0.0988079347, 1.73876624e-2, 4.00173216e-3, 9.82351731e-4, 1.56396993e-4, ], "x_lim": [-3, 3], }, KingProjectedAnalytic1D: { "parameters": [1, 1, 2], "x_values": [0, 0.1, 0.5, 0.8], "y_values": [0.30557281, 0.30011069, 0.2, 0.1113258], "x_lim": [0, 10], "y_lim": [0, 10], "bbox_peak": True, }, Drude1D: { "parameters": [1.0, 8.0, 1.0], "x_values": [7.0, 8.0, 9.0, 10.0], "y_values": [0.17883212, 1.0, 0.21891892, 0.07163324], "x_lim": [1.0, 20.0], "y_lim": [0.0, 10.0], "bbox_peak": True, }, Plummer1D: { "parameters": [10.0, 0.5], "x_values": [1.0000e-03, 2.5005e00, 5.0000e00], "y_values": [1.90984022e01, 5.53541843e-03, 1.86293603e-04], "x_lim": [0.001, 100], }, Exponential1D: { "parameters": [1, 1], "x_values": [0, 0.5, 1], "y_values": [1, np.sqrt(np.e), np.e], "x_lim": [0, 2], "integral": (np.e**2 - 1.0), }, Logarithmic1D: { "parameters": [1, 1], "x_values": [1, np.e, np.e**2], "y_values": [0, 1, 2], "x_lim": [1, np.e**2], "integral": (np.e**2 + 1), }, } # 2D Models models_2D = { Gaussian2D: { "parameters": [1, 0, 0, 1, 1], "constraints": {"fixed": {"theta": True}}, "x_values": [0, np.sqrt(2), -np.sqrt(2)], "y_values": [0, np.sqrt(2), -np.sqrt(2)], "z_values": [1, 1.0 / np.exp(1) ** 2, 1.0 / np.exp(1) ** 2], "x_lim": [-10, 10], "y_lim": [-10, 10], "integral": 2 * np.pi, "deriv_parameters": [137.0, 5.1, 5.4, 1.5, 2.0, np.pi / 4], "deriv_initial": [10, 5, 5, 4, 4, 0.5], "bbox_peak": True, }, Const2D: { "parameters": [1], "x_values": [-1, 1, np.pi, -42.0, 0], "y_values": [0, 1, 42, np.pi, -1], "z_values": [1, 1, 1, 1, 1], "x_lim": [-10, 10], "y_lim": [-10, 10], "integral": 400, }, Box2D: { "parameters": [1, 0, 0, 10, 10], "x_values": [-5, 5, -5, 5, 0, -10, 10], "y_values": [-5, 5, 0, 0, 0, -10, 10], "z_values": [1, 1, 1, 1, 1, 0, 0], "x_lim": [-10, 10], "y_lim": [-10, 10], "integral": 100, "bbox_peak": True, }, RickerWavelet2D: { "parameters": [1, 0, 0, 1], "x_values": [0, 0, 0, 0, 0, 1, -1, 3, -3], "y_values": [0, 1, -1, 3, -3, 0, 0, 0, 0], "z_values": [ 1.0, 0.303265, 0.303265, -0.038881, -0.038881, 0.303265, 0.303265, -0.038881, -0.038881, ], "x_lim": [-10, 11], "y_lim": [-10, 11], "integral": 0, }, TrapezoidDisk2D: { "parameters": [1, 0, 0, 1, 1], "x_values": [0, 0.5, 0, 1.5], "y_values": [0, 0.5, 1.5, 0], "z_values": [1, 1, 0.5, 0.5], "x_lim": [-3, 3], "y_lim": [-3, 3], "bbox_peak": True, }, AiryDisk2D: { "parameters": [7, 0, 0, 10], "x_values": [0, 1, -1, -0.5, -0.5], "y_values": [0, -1, 0.5, 0.5, -0.5], "z_values": [7.0, 6.50158267, 6.68490643, 6.87251093, 6.87251093], "x_lim": [-10, 10], "y_lim": [-10, 10], "requires_scipy": True, }, Moffat2D: { "parameters": [1, 0, 0, 1, 2], "x_values": [0, 1, -1, 3, -3], "y_values": [0, -1, 3, 1, -3], "z_values": [1.0, 0.111111, 0.008264, 0.008264, 0.00277], "x_lim": [-3, 3], "y_lim": [-3, 3], }, Polynomial2D: { "parameters": {"degree": 1, "c0_0": 1.0, "c1_0": 1.0, "c0_1": 1.0}, "x_values": [1, 2, 3], "y_values": [1, 3, 2], "z_values": [3, 6, 6], "x_lim": [1, 100], "y_lim": [1, 100], }, Disk2D: { "parameters": [1, 0, 0, 5], "x_values": [-5, 5, -5, 5, 0, -10, 10], "y_values": [-5, 5, 0, 0, 0, -10, 10], "z_values": [0, 0, 1, 1, 1, 0, 0], "x_lim": [-10, 10], "y_lim": [-10, 10], "integral": np.pi * 5**2, "bbox_peak": True, }, Ring2D: { "parameters": [1, 0, 0, 5, 5], "x_values": [-5, 5, -5, 5, 0, -10, 10], "y_values": [-5, 5, 0, 0, 0, -10, 10], "z_values": [1, 1, 1, 1, 0, 0, 0], "x_lim": [-10, 10], "y_lim": [-10, 10], "integral": np.pi * (10**2 - 5**2), "bbox_peak": True, }, Sersic2D: { "parameters": [1, 25, 4, 50, 50, 0.5, -1], "x_values": [0.0, 1, 10, 100], "y_values": [1, 100, 0.0, 10], "z_values": [1.686398e-02, 9.095221e-02, 2.341879e-02, 9.419231e-02], "requires_scipy": True, "x_lim": [1, 1e10], "y_lim": [1, 1e10], }, Planar2D: { "parameters": [1, 1, 0], "x_values": [0, np.pi, 42, -1], "y_values": [np.pi, 0, -1, 42], "z_values": [np.pi, np.pi, 41, 41], "x_lim": [-10, 10], "y_lim": [-10, 10], "integral": 0, }, }
642757d8cb83bf3e0060ec796ba5fe7566b2c344345f0000353faab65179285e
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Module to test fitting routines """ # pylint: disable=invalid-name import os.path import unittest.mock as mk from importlib.metadata import EntryPoint from itertools import combinations from unittest import mock import numpy as np import pytest from numpy import linalg from numpy.testing import assert_allclose, assert_almost_equal, assert_equal from astropy.modeling import models from astropy.modeling.core import Fittable2DModel, Parameter from astropy.modeling.fitting import ( DogBoxLSQFitter, Fitter, FittingWithOutlierRemoval, JointFitter, LevMarLSQFitter, LinearLSQFitter, LMLSQFitter, NonFiniteValueError, SimplexLSQFitter, SLSQPLSQFitter, TRFLSQFitter, _NLLSQFitter, populate_entry_points, ) from astropy.modeling.optimizers import Optimization from astropy.stats import sigma_clip from astropy.utils import NumpyRNGContext from astropy.utils.compat.optional_deps import HAS_SCIPY from astropy.utils.data import get_pkg_data_filename from astropy.utils.exceptions import AstropyUserWarning from . import irafutil if HAS_SCIPY: from scipy import optimize fitters = [SimplexLSQFitter, SLSQPLSQFitter] non_linear_fitters = [LevMarLSQFitter, TRFLSQFitter, LMLSQFitter, DogBoxLSQFitter] _RANDOM_SEED = 0x1337 class TestPolynomial2D: """Tests for 2D polynomial fitting.""" def setup_class(self): self.model = models.Polynomial2D(2) self.y, self.x = np.mgrid[:5, :5] def poly2(x, y): return 1 + 2 * x + 3 * x**2 + 4 * y + 5 * y**2 + 6 * x * y self.z = poly2(self.x, self.y) def test_poly2D_fitting(self): fitter = LinearLSQFitter() v = self.model.fit_deriv(x=self.x, y=self.y) p = linalg.lstsq(v, self.z.flatten(), rcond=-1)[0] new_model = fitter(self.model, self.x, self.y, self.z) assert_allclose(new_model.parameters, p) def test_eval(self): fitter = LinearLSQFitter() new_model = fitter(self.model, self.x, self.y, self.z) assert_allclose(new_model(self.x, self.y), self.z) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", non_linear_fitters) def test_nonlinear_fitting(self, fitter): fitter = fitter() self.model.parameters = [0.6, 1.8, 2.9, 3.7, 4.9, 6.7] with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"): new_model = fitter(self.model, self.x, self.y, self.z) assert_allclose(new_model.parameters, [1, 2, 3, 4, 5, 6]) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_compare_nonlinear_fitting(self): self.model.parameters = [0.6, 1.8, 2.9, 3.7, 4.9, 6.7] fit_models = [] for fitter in non_linear_fitters: fitter = fitter() with pytest.warns( AstropyUserWarning, match=r"Model is linear in parameters" ): fit_models.append(fitter(self.model, self.x, self.y, self.z)) for pair in combinations(fit_models, 2): assert_allclose(pair[0].parameters, pair[1].parameters) class TestICheb2D: """ Tests 2D Chebyshev polynomial fitting Create a 2D polynomial (z) using Polynomial2DModel and default coefficients Fit z using a ICheb2D model Evaluate the ICheb2D polynomial and compare with the initial z """ def setup_class(self): self.pmodel = models.Polynomial2D(2) self.y, self.x = np.mgrid[:5, :5] self.z = self.pmodel(self.x, self.y) self.cheb2 = models.Chebyshev2D(2, 2) self.fitter = LinearLSQFitter() def test_default_params(self): self.cheb2.parameters = np.arange(9) p = np.array( [1344.0, 1772.0, 400.0, 1860.0, 2448.0, 552.0, 432.0, 568.0, 128.0] ) z = self.cheb2(self.x, self.y) model = self.fitter(self.cheb2, self.x, self.y, z) assert_almost_equal(model.parameters, p) def test_poly2D_cheb2D(self): model = self.fitter(self.cheb2, self.x, self.y, self.z) z1 = model(self.x, self.y) assert_almost_equal(self.z, z1) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", non_linear_fitters) def test_chebyshev2D_nonlinear_fitting(self, fitter): fitter = fitter() cheb2d = models.Chebyshev2D(2, 2) cheb2d.parameters = np.arange(9) z = cheb2d(self.x, self.y) cheb2d.parameters = [0.1, 0.6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9] with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"): model = fitter(cheb2d, self.x, self.y, z) assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8], atol=10**-9) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", non_linear_fitters) def test_chebyshev2D_nonlinear_fitting_with_weights(self, fitter): fitter = fitter() cheb2d = models.Chebyshev2D(2, 2) cheb2d.parameters = np.arange(9) z = cheb2d(self.x, self.y) cheb2d.parameters = [0.1, 0.6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9] weights = np.ones_like(self.y) with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"): model = fitter(cheb2d, self.x, self.y, z, weights=weights) assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8], atol=10**-9) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") class TestJointFitter: """ Tests the joint fitting routine using 2 gaussian models """ def setup_class(self): """ Create 2 gaussian models and some data with noise. Create a fitter for the two models keeping the amplitude parameter common for the two models. """ self.g1 = models.Gaussian1D(10, mean=14.9, stddev=0.3) self.g2 = models.Gaussian1D(10, mean=13, stddev=0.4) self.jf = JointFitter( [self.g1, self.g2], {self.g1: ["amplitude"], self.g2: ["amplitude"]}, [9.8] ) self.x = np.arange(10, 20, 0.1) y1 = self.g1(self.x) y2 = self.g2(self.x) with NumpyRNGContext(_RANDOM_SEED): n = np.random.randn(100) self.ny1 = y1 + 2 * n self.ny2 = y2 + 2 * n self.jf(self.x, self.ny1, self.x, self.ny2) def test_joint_parameter(self): """ Tests that the amplitude of the two models is the same """ assert_allclose(self.jf.fitparams[0], self.g1.parameters[0]) assert_allclose(self.jf.fitparams[0], self.g2.parameters[0]) def test_joint_fitter(self): """ Tests the fitting routine with similar procedure. Compares the fitted parameters. """ p1 = [14.9, 0.3] p2 = [13, 0.4] A = 9.8 p = np.r_[A, p1, p2] def model(A, p, x): return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2) def errfunc(p, x1, y1, x2, y2): return np.ravel( np.r_[model(p[0], p[1:3], x1) - y1, model(p[0], p[3:], x2) - y2] ) coeff, _ = optimize.leastsq( errfunc, p, args=(self.x, self.ny1, self.x, self.ny2) ) assert_allclose(coeff, self.jf.fitparams, rtol=10 ** (-2)) class TestLinearLSQFitter: def test_compound_model_raises_error(self): """Test that if an user tries to use a compound model, raises an error""" MESSAGE = r"Model must be simple, not compound" with pytest.raises(ValueError, match=MESSAGE): init_model1 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2) init_model2 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2) init_model_comp = init_model1 + init_model2 x = np.arange(10) y = init_model_comp(x, model_set_axis=False) fitter = LinearLSQFitter() fitter(init_model_comp, x, y) def test_chebyshev1D(self): """Tests fitting a 1D Chebyshev polynomial to some real world data.""" test_file = get_pkg_data_filename(os.path.join("data", "idcompspec.fits")) with open(test_file) as f: lines = f.read() reclist = lines.split("begin") record = irafutil.IdentifyRecord(reclist[1]) coeffs = record.coeff order = int(record.fields["order"]) initial_model = models.Chebyshev1D(order - 1, domain=record.get_range()) fitter = LinearLSQFitter() fitted_model = fitter(initial_model, record.x, record.z) assert_allclose(fitted_model.parameters, np.array(coeffs), rtol=10e-2) def test_linear_fit_model_set(self): """Tests fitting multiple models simultaneously.""" init_model = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2) x = np.arange(10) y_expected = init_model(x, model_set_axis=False) assert y_expected.shape == (2, 10) # Add a bit of random noise with NumpyRNGContext(_RANDOM_SEED): y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape) fitter = LinearLSQFitter() fitted_model = fitter(init_model, x, y) assert_allclose(fitted_model(x, model_set_axis=False), y_expected, rtol=1e-1) def test_linear_fit_2d_model_set(self): """Tests fitted multiple 2-D models simultaneously.""" init_model = models.Polynomial2D(degree=2, c0_0=[1, 1], n_models=2) x = np.arange(10) y = np.arange(10) z_expected = init_model(x, y, model_set_axis=False) assert z_expected.shape == (2, 10) # Add a bit of random noise with NumpyRNGContext(_RANDOM_SEED): z = z_expected + np.random.normal(0, 0.01, size=z_expected.shape) fitter = LinearLSQFitter() fitted_model = fitter(init_model, x, y, z) assert_allclose(fitted_model(x, y, model_set_axis=False), z_expected, rtol=1e-1) def test_linear_fit_fixed_parameter(self): """ Tests fitting a polynomial model with a fixed parameter (issue #6135). """ init_model = models.Polynomial1D(degree=2, c1=1) init_model.c1.fixed = True x = np.arange(10) y = 2 + x + 0.5 * x * x fitter = LinearLSQFitter() fitted_model = fitter(init_model, x, y) assert_allclose(fitted_model.parameters, [2.0, 1.0, 0.5], atol=1e-14) def test_linear_fit_model_set_fixed_parameter(self): """ Tests fitting a polynomial model set with a fixed parameter (#6135). """ init_model = models.Polynomial1D(degree=2, c1=[1, -2], n_models=2) init_model.c1.fixed = True x = np.arange(10) yy = np.array([2 + x + 0.5 * x * x, -2 * x]) fitter = LinearLSQFitter() fitted_model = fitter(init_model, x, yy) assert_allclose(fitted_model.c0, [2.0, 0.0], atol=1e-14) assert_allclose(fitted_model.c1, [1.0, -2.0], atol=1e-14) assert_allclose(fitted_model.c2, [0.5, 0.0], atol=1e-14) def test_linear_fit_2d_model_set_fixed_parameters(self): """ Tests fitting a 2d polynomial model set with fixed parameters (#6135). """ init_model = models.Polynomial2D( degree=2, c1_0=[1, 2], c0_1=[-0.5, 1], n_models=2, fixed={"c1_0": True, "c0_1": True}, ) x, y = np.mgrid[0:5, 0:5] zz = np.array([1 + x - 0.5 * y + 0.1 * x * x, 2 * x + y - 0.2 * y * y]) fitter = LinearLSQFitter() fitted_model = fitter(init_model, x, y, zz) assert_allclose(fitted_model(x, y, model_set_axis=False), zz, atol=1e-14) def test_linear_fit_model_set_masked_values(self): """ Tests model set fitting with masked value(s) (#4824, #6819). """ # NB. For single models, there is an equivalent doctest. init_model = models.Polynomial1D(degree=1, n_models=2) x = np.arange(10) y = np.ma.masked_array([2 * x + 1, x - 2], mask=np.zeros_like([x, x])) y[0, 7] = 100.0 # throw off fit coefficients if unmasked y.mask[0, 7] = True y[1, 1:3] = -100.0 y.mask[1, 1:3] = True fitter = LinearLSQFitter() fitted_model = fitter(init_model, x, y) assert_allclose(fitted_model.c0, [1.0, -2.0], atol=1e-14) assert_allclose(fitted_model.c1, [2.0, 1.0], atol=1e-14) def test_linear_fit_2d_model_set_masked_values(self): """ Tests 2D model set fitting with masked value(s) (#4824, #6819). """ init_model = models.Polynomial2D(1, n_models=2) x, y = np.mgrid[0:5, 0:5] z = np.ma.masked_array( [2 * x + 3 * y + 1, x - 0.5 * y - 2], mask=np.zeros_like([x, x]) ) z[0, 3, 1] = -1000.0 # throw off fit coefficients if unmasked z.mask[0, 3, 1] = True fitter = LinearLSQFitter() fitted_model = fitter(init_model, x, y, z) assert_allclose(fitted_model.c0_0, [1.0, -2.0], atol=1e-14) assert_allclose(fitted_model.c1_0, [2.0, 1.0], atol=1e-14) assert_allclose(fitted_model.c0_1, [3.0, -0.5], atol=1e-14) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") class TestNonLinearFitters: """Tests non-linear least squares fitting and the SLSQP algorithm.""" def setup_class(self): self.initial_values = [100, 5, 1] self.xdata = np.arange(0, 10, 0.1) sigma = 4.0 * np.ones_like(self.xdata) with NumpyRNGContext(_RANDOM_SEED): yerror = np.random.normal(0, sigma) def func(p, x): return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2) self.ydata = func(self.initial_values, self.xdata) + yerror self.gauss = models.Gaussian1D(100, 5, stddev=1) @pytest.mark.parametrize("fitter0", non_linear_fitters) @pytest.mark.parametrize("fitter1", non_linear_fitters) def test_estimated_vs_analytic_deriv(self, fitter0, fitter1): """ Runs `LevMarLSQFitter` and `TRFLSQFitter` with estimated and analytic derivatives of a `Gaussian1D`. """ fitter0 = fitter0() model = fitter0(self.gauss, self.xdata, self.ydata) g1e = models.Gaussian1D(100, 5.0, stddev=1) fitter1 = fitter1() emodel = fitter1(g1e, self.xdata, self.ydata, estimate_jacobian=True) assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3)) @pytest.mark.parametrize("fitter0", non_linear_fitters) @pytest.mark.parametrize("fitter1", non_linear_fitters) def test_estimated_vs_analytic_deriv_with_weights(self, fitter0, fitter1): """ Runs `LevMarLSQFitter` and `TRFLSQFitter` with estimated and analytic derivatives of a `Gaussian1D`. """ weights = 1.0 / (self.ydata / 10.0) fitter0 = fitter0() model = fitter0(self.gauss, self.xdata, self.ydata, weights=weights) g1e = models.Gaussian1D(100, 5.0, stddev=1) fitter1 = fitter1() emodel = fitter1( g1e, self.xdata, self.ydata, weights=weights, estimate_jacobian=True ) assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3)) @pytest.mark.parametrize("fitter", non_linear_fitters) def test_with_optimize(self, fitter): """ Tests results from `LevMarLSQFitter` and `TRFLSQFitter` against `scipy.optimize.leastsq`. """ fitter = fitter() model = fitter(self.gauss, self.xdata, self.ydata, estimate_jacobian=True) def func(p, x): return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2) def errfunc(p, x, y): return func(p, x) - y result = optimize.leastsq( errfunc, self.initial_values, args=(self.xdata, self.ydata) ) assert_allclose(model.parameters, result[0], rtol=10 ** (-3)) @pytest.mark.parametrize("fitter", non_linear_fitters) def test_with_weights(self, fitter): """ Tests results from `LevMarLSQFitter` and `TRFLSQFitter` with weights. """ fitter = fitter() # part 1: weights are equal to 1 model = fitter(self.gauss, self.xdata, self.ydata, estimate_jacobian=True) withw = fitter( self.gauss, self.xdata, self.ydata, estimate_jacobian=True, weights=np.ones_like(self.xdata), ) assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4)) # part 2: weights are 0 or 1 (effectively, they are a mask) weights = np.zeros_like(self.xdata) weights[::2] = 1.0 mask = weights >= 1.0 model = fitter( self.gauss, self.xdata[mask], self.ydata[mask], estimate_jacobian=True ) withw = fitter( self.gauss, self.xdata, self.ydata, estimate_jacobian=True, weights=weights ) assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4)) @pytest.mark.filterwarnings(r"ignore:.* Maximum number of iterations reached") @pytest.mark.filterwarnings( r"ignore:Values in x were outside bounds during a minimize step, " r"clipping to bounds" ) @pytest.mark.parametrize("fitter_class", fitters) @pytest.mark.parametrize("fitter", non_linear_fitters) def test_fitter_against_LevMar(self, fitter_class, fitter): """ Tests results from non-linear fitters against `LevMarLSQFitter` and `TRFLSQFitter` """ fitter = fitter() fitter_cls = fitter_class() # This emits a warning from fitter that we need to ignore with # pytest.mark.filterwarnings above. new_model = fitter_cls(self.gauss, self.xdata, self.ydata) model = fitter(self.gauss, self.xdata, self.ydata) assert_allclose(model.parameters, new_model.parameters, rtol=10 ** (-4)) @pytest.mark.filterwarnings( r"ignore:Values in x were outside bounds during a minimize step, " r"clipping to bounds" ) @pytest.mark.parametrize("fitter", non_linear_fitters) def test_LSQ_SLSQP_with_constraints(self, fitter): """ Runs `LevMarLSQFitter`/`TRFLSQFitter` and `SLSQPLSQFitter` on a model with constraints. """ fitter = fitter() g1 = models.Gaussian1D(100, 5, stddev=1) g1.mean.fixed = True fslsqp = SLSQPLSQFitter() slsqp_model = fslsqp(g1, self.xdata, self.ydata) model = fitter(g1, self.xdata, self.ydata) assert_allclose(model.parameters, slsqp_model.parameters, rtol=10 ** (-4)) @pytest.mark.parametrize("fitter", non_linear_fitters) def test_non_linear_lsq_fitter_with_weights(self, fitter): """ Tests that issue #11581 has been solved. """ fitter = fitter() np.random.seed(42) norder = 2 fitter2 = LinearLSQFitter() model = models.Polynomial1D(norder) npts = 10000 c = [2.0, -10.0, 7.0] tw = np.random.uniform(0.0, 10.0, npts) tx = np.random.uniform(0.0, 10.0, npts) ty = c[0] + c[1] * tx + c[2] * (tx**2) ty += np.random.normal(0.0, 1.5, npts) with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"): tf1 = fitter(model, tx, ty, weights=tw) tf2 = fitter2(model, tx, ty, weights=tw) assert_allclose(tf1.parameters, tf2.parameters, atol=10 ** (-16)) assert_allclose(tf1.parameters, c, rtol=10 ** (-2), atol=10 ** (-2)) model = models.Gaussian1D() if isinstance(fitter, TRFLSQFitter) or isinstance(fitter, LMLSQFitter): with pytest.warns( AstropyUserWarning, match=r"The fit may be unsuccessful; *." ): fitter(model, tx, ty, weights=tw) else: fitter(model, tx, ty, weights=tw) model = models.Polynomial2D(norder) nxpts = 100 nypts = 150 npts = nxpts * nypts c = [1.0, 4.0, 7.0, -8.0, -9.0, -3.0] tw = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts) tx = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts) ty = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts) tz = ( c[0] + c[1] * tx + c[2] * (tx**2) + c[3] * ty + c[4] * (ty**2) + c[5] * tx * ty ) tz += np.random.normal(0.0, 1.5, npts).reshape(nxpts, nypts) with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"): tf1 = fitter(model, tx, ty, tz, weights=tw) tf2 = fitter2(model, tx, ty, tz, weights=tw) assert_allclose(tf1.parameters, tf2.parameters, atol=10 ** (-16)) assert_allclose(tf1.parameters, c, rtol=10 ** (-2), atol=10 ** (-2)) def test_simplex_lsq_fitter(self): """A basic test for the `SimplexLSQ` fitter.""" class Rosenbrock(Fittable2DModel): a = Parameter() b = Parameter() @staticmethod def evaluate(x, y, a, b): return (a - x) ** 2 + b * (y - x**2) ** 2 x = y = np.linspace(-3.0, 3.0, 100) with NumpyRNGContext(_RANDOM_SEED): z = Rosenbrock.evaluate(x, y, 1.0, 100.0) z += np.random.normal(0.0, 0.1, size=z.shape) fitter = SimplexLSQFitter() r_i = Rosenbrock(1, 100) r_f = fitter(r_i, x, y, z) assert_allclose(r_f.parameters, [1.0, 100.0], rtol=1e-2) @pytest.mark.parametrize("fitter", non_linear_fitters) def test_param_cov(self, fitter): """ Tests that the 'param_cov' fit_info entry gets the right answer for *linear* least squares, where the answer is exact """ fitter = fitter() a = 2 b = 100 with NumpyRNGContext(_RANDOM_SEED): x = np.linspace(0, 1, 100) # y scatter is amplitude ~1 to make sure covarience is # non-negligible y = x * a + b + np.random.randn(len(x)) # first compute the ordinary least squares covariance matrix X = np.vstack([x, np.ones(len(x))]).T beta = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), y.T) s2 = np.sum((y - np.matmul(X, beta).ravel()) ** 2) / (len(y) - len(beta)) olscov = np.linalg.inv(np.matmul(X.T, X)) * s2 # now do the non-linear least squares fit mod = models.Linear1D(a, b) with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"): fmod = fitter(mod, x, y) assert_allclose(fmod.parameters, beta.ravel()) assert_allclose(olscov, fitter.fit_info["param_cov"]) class TestEntryPoint: """Tests population of fitting with entry point fitters""" def successfulimport(self): # This should work class goodclass(Fitter): __name__ = "GoodClass" return goodclass def raiseimporterror(self): # This should fail as it raises an Import Error raise ImportError def returnbadfunc(self): def badfunc(): # This should import but it should fail type check pass return badfunc def returnbadclass(self): # This should import But it should fail subclass type check class badclass: pass return badclass def test_working(self): """This should work fine""" mock_entry_working = mock.create_autospec(EntryPoint) mock_entry_working.name = "Working" mock_entry_working.load = self.successfulimport populate_entry_points([mock_entry_working]) def test_import_error(self): """This raises an import error on load to test that it is handled correctly""" mock_entry_importerror = mock.create_autospec(EntryPoint) mock_entry_importerror.name = "IErr" mock_entry_importerror.load = self.raiseimporterror with pytest.warns(AstropyUserWarning, match=r".*ImportError.*"): populate_entry_points([mock_entry_importerror]) def test_bad_func(self): """This returns a function which fails the type check""" mock_entry_badfunc = mock.create_autospec(EntryPoint) mock_entry_badfunc.name = "BadFunc" mock_entry_badfunc.load = self.returnbadfunc with pytest.warns(AstropyUserWarning, match=r".*Class.*"): populate_entry_points([mock_entry_badfunc]) def test_bad_class(self): """This returns a class which doesn't inherient from fitter""" mock_entry_badclass = mock.create_autospec(EntryPoint) mock_entry_badclass.name = "BadClass" mock_entry_badclass.load = self.returnbadclass with pytest.warns(AstropyUserWarning, match=r".*BadClass.*"): populate_entry_points([mock_entry_badclass]) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") class Test1DFittingWithOutlierRemoval: def setup_class(self): self.x = np.linspace(-5.0, 5.0, 200) self.model_params = (3.0, 1.3, 0.8) def func(p, x): return p[0] * np.exp(-0.5 * (x - p[1]) ** 2 / p[2] ** 2) self.y = func(self.model_params, self.x) @pytest.mark.filterwarnings("ignore:The fit may be unsuccessful") @pytest.mark.filterwarnings( r"ignore:Values in x were outside bounds during a minimize step, " r"clipping to bounds" ) @pytest.mark.parametrize("fitter", non_linear_fitters + fitters) def test_with_fitters_and_sigma_clip(self, fitter): import scipy.stats as stats fitter = fitter() np.random.seed(0) c = stats.bernoulli.rvs(0.25, size=self.x.shape) y = self.y + ( np.random.normal(0.0, 0.2, self.x.shape) + c * np.random.normal(3.0, 5.0, self.x.shape) ) g_init = models.Gaussian1D(amplitude=1.0, mean=0, stddev=1.0) fit = FittingWithOutlierRemoval(fitter, sigma_clip, niter=3, sigma=3.0) fitted_model, _ = fit(g_init, self.x, y) assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") class Test2DFittingWithOutlierRemoval: def setup_class(self): self.y, self.x = np.mgrid[-3:3:128j, -3:3:128j] self.model_params = (3.0, 1.0, 0.0, 0.8, 0.8) def Gaussian_2D(p, pos): return p[0] * np.exp( -0.5 * (pos[0] - p[2]) ** 2 / p[4] ** 2 - 0.5 * (pos[1] - p[1]) ** 2 / p[3] ** 2 ) self.z = Gaussian_2D(self.model_params, np.array([self.y, self.x])) def initial_guess(self, data, pos): y = pos[0] x = pos[1] """computes the centroid of the data as the initial guess for the center position""" wx = x * data wy = y * data total_intensity = np.sum(data) x_mean = np.sum(wx) / total_intensity y_mean = np.sum(wy) / total_intensity x_to_pixel = x[0].size / (x[x[0].size - 1][x[0].size - 1] - x[0][0]) y_to_pixel = y[0].size / (y[y[0].size - 1][y[0].size - 1] - y[0][0]) x_pos = np.around(x_mean * x_to_pixel + x[0].size / 2.0).astype(int) y_pos = np.around(y_mean * y_to_pixel + y[0].size / 2.0).astype(int) amplitude = data[y_pos][x_pos] return amplitude, x_mean, y_mean @pytest.mark.filterwarnings("ignore:The fit may be unsuccessful") @pytest.mark.filterwarnings( r"ignore:Values in x were outside bounds during a minimize step, " r"clipping to bounds" ) @pytest.mark.parametrize("fitter", non_linear_fitters + fitters) def test_with_fitters_and_sigma_clip(self, fitter): import scipy.stats as stats fitter = fitter() np.random.seed(0) c = stats.bernoulli.rvs(0.25, size=self.z.shape) z = self.z + ( np.random.normal(0.0, 0.2, self.z.shape) + c * np.random.normal(self.z, 2.0, self.z.shape) ) guess = self.initial_guess(self.z, np.array([self.y, self.x])) g2_init = models.Gaussian2D( amplitude=guess[0], x_mean=guess[1], y_mean=guess[2], x_stddev=0.75, y_stddev=1.25, ) fit = FittingWithOutlierRemoval(fitter, sigma_clip, niter=3, sigma=3.0) fitted_model, _ = fit(g2_init, self.x, self.y, z) assert_allclose(fitted_model.parameters[0:5], self.model_params, atol=1e-1) def test_1d_set_fitting_with_outlier_removal(): """Test model set fitting with outlier removal (issue #6819)""" poly_set = models.Polynomial1D(2, n_models=2) fitter = FittingWithOutlierRemoval( LinearLSQFitter(), sigma_clip, sigma=2.5, niter=3, cenfunc=np.ma.mean, stdfunc=np.ma.std, ) x = np.arange(10) y = np.array([2.5 * x - 4, 2 * x * x + x + 10]) y[1, 5] = -1000 # outlier poly_set, filt_y = fitter(poly_set, x, y) assert_allclose(poly_set.c0, [-4.0, 10.0], atol=1e-14) assert_allclose(poly_set.c1, [2.5, 1.0], atol=1e-14) assert_allclose(poly_set.c2, [0.0, 2.0], atol=1e-14) def test_2d_set_axis_2_fitting_with_outlier_removal(): """Test fitting 2D model set (axis 2) with outlier removal (issue #6819)""" poly_set = models.Polynomial2D(1, n_models=2, model_set_axis=2) fitter = FittingWithOutlierRemoval( LinearLSQFitter(), sigma_clip, sigma=2.5, niter=3, cenfunc=np.ma.mean, stdfunc=np.ma.std, ) y, x = np.mgrid[0:5, 0:5] z = np.rollaxis(np.array([x + y, 1 - 0.1 * x + 0.2 * y]), 0, 3) z[3, 3:5, 0] = 100.0 # outliers poly_set, filt_z = fitter(poly_set, x, y, z) assert_allclose(poly_set.c0_0, [[[0.0, 1.0]]], atol=1e-14) assert_allclose(poly_set.c1_0, [[[1.0, -0.1]]], atol=1e-14) assert_allclose(poly_set.c0_1, [[[1.0, 0.2]]], atol=1e-14) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") class TestWeightedFittingWithOutlierRemoval: """Issue #7020""" def setup_class(self): # values of x,y not important as we fit y(x,y) = p0 model here self.y, self.x = np.mgrid[0:20, 0:20] self.z = np.mod(self.x + self.y, 2) * 2 - 1 # -1,1 chessboard self.weights = np.mod(self.x + self.y, 2) * 2 + 1 # 1,3 chessboard self.z[0, 0] = 1000.0 # outlier self.z[0, 1] = 1000.0 # outlier self.x1d = self.x.flatten() self.z1d = self.z.flatten() self.weights1d = self.weights.flatten() def test_1d_without_weights_without_sigma_clip(self): model = models.Polynomial1D(0) fitter = LinearLSQFitter() fit = fitter(model, self.x1d, self.z1d) assert_allclose(fit.parameters[0], self.z1d.mean(), atol=10 ** (-2)) def test_1d_without_weights_with_sigma_clip(self): model = models.Polynomial1D(0) fitter = FittingWithOutlierRemoval( LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0 ) fit, mask = fitter(model, self.x1d, self.z1d) assert (~mask).sum() == self.z1d.size - 2 assert mask[0] and mask[1] assert_allclose( fit.parameters[0], 0.0, atol=10 ** (-2) ) # with removed outliers mean is 0.0 def test_1d_with_weights_without_sigma_clip(self): model = models.Polynomial1D(0) fitter = LinearLSQFitter() fit = fitter(model, self.x1d, self.z1d, weights=self.weights1d) assert fit.parameters[0] > 1.0 # outliers pulled it high def test_1d_with_weights_with_sigma_clip(self): """ smoke test for #7020 - fails without fitting.py patch because weights does not propagate """ model = models.Polynomial1D(0) fitter = FittingWithOutlierRemoval( LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0 ) fit, filtered = fitter(model, self.x1d, self.z1d, weights=self.weights1d) assert fit.parameters[0] > 10 ** (-2) # weights pulled it > 0 # outliers didn't pull it out of [-1:1] because they had been removed assert fit.parameters[0] < 1.0 def test_1d_set_with_common_weights_with_sigma_clip(self): """added for #6819 (1D model set with weights in common)""" model = models.Polynomial1D(0, n_models=2) fitter = FittingWithOutlierRemoval( LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0 ) z1d = np.array([self.z1d, self.z1d]) fit, filtered = fitter(model, self.x1d, z1d, weights=self.weights1d) assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14) def test_1d_set_with_weights_with_sigma_clip(self): """1D model set with separate weights""" model = models.Polynomial1D(0, n_models=2) fitter = FittingWithOutlierRemoval( LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0 ) z1d = np.array([self.z1d, self.z1d]) weights = np.array([self.weights1d, self.weights1d]) fit, filtered = fitter(model, self.x1d, z1d, weights=weights) assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14) def test_2d_without_weights_without_sigma_clip(self): model = models.Polynomial2D(0) fitter = LinearLSQFitter() fit = fitter(model, self.x, self.y, self.z) assert_allclose(fit.parameters[0], self.z.mean(), atol=10 ** (-2)) def test_2d_without_weights_with_sigma_clip(self): model = models.Polynomial2D(0) fitter = FittingWithOutlierRemoval( LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0 ) fit, mask = fitter(model, self.x, self.y, self.z) assert (~mask).sum() == self.z.size - 2 assert mask[0, 0] and mask[0, 1] assert_allclose(fit.parameters[0], 0.0, atol=10 ** (-2)) @pytest.mark.parametrize("fitter", non_linear_fitters) def test_2d_with_weights_without_sigma_clip(self, fitter): fitter = fitter() model = models.Polynomial2D(0) with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"): fit = fitter(model, self.x, self.y, self.z, weights=self.weights) assert fit.parameters[0] > 1.0 # outliers pulled it high def test_2d_linear_with_weights_without_sigma_clip(self): model = models.Polynomial2D(0) # LinearLSQFitter doesn't handle weights properly in 2D fitter = LinearLSQFitter() fit = fitter(model, self.x, self.y, self.z, weights=self.weights) assert fit.parameters[0] > 1.0 # outliers pulled it high @pytest.mark.parametrize("base_fitter", non_linear_fitters) def test_2d_with_weights_with_sigma_clip(self, base_fitter): """smoke test for #7020 - fails without fitting.py patch because weights does not propagate""" base_fitter = base_fitter() model = models.Polynomial2D(0) fitter = FittingWithOutlierRemoval(base_fitter, sigma_clip, niter=3, sigma=3.0) with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"): fit, _ = fitter(model, self.x, self.y, self.z, weights=self.weights) assert fit.parameters[0] > 10 ** (-2) # weights pulled it > 0 # outliers didn't pull it out of [-1:1] because they had been removed assert fit.parameters[0] < 1.0 def test_2d_linear_with_weights_with_sigma_clip(self): """same as test above with a linear fitter.""" model = models.Polynomial2D(0) fitter = FittingWithOutlierRemoval( LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0 ) fit, _ = fitter(model, self.x, self.y, self.z, weights=self.weights) assert fit.parameters[0] > 10 ** (-2) # weights pulled it > 0 # outliers didn't pull it out of [-1:1] because they had been removed assert fit.parameters[0] < 1.0 @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", non_linear_fitters) def test_fitters_with_weights(fitter): """Issue #5737""" fitter = fitter() if isinstance(fitter, _NLLSQFitter): pytest.xfail( "This test is poorly designed and causes issues for " "scipy.optimize.least_squares based fitters" ) Xin, Yin = np.mgrid[0:21, 0:21] with NumpyRNGContext(_RANDOM_SEED): zsig = np.random.normal(0, 0.01, size=Xin.shape) # Non-linear model g2 = models.Gaussian2D(10, 10, 9, 2, 3) z = g2(Xin, Yin) gmod = fitter(models.Gaussian2D(15, 7, 8, 1.3, 1.2), Xin, Yin, z + zsig) assert_allclose(gmod.parameters, g2.parameters, atol=10 ** (-2)) # Linear model p2 = models.Polynomial2D(3) p2.parameters = np.arange(10) / 1.2 z = p2(Xin, Yin) with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"): pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig) assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2)) def test_linear_fitter_with_weights(): """Regression test for #7035""" Xin, Yin = np.mgrid[0:21, 0:21] fitter = LinearLSQFitter() with NumpyRNGContext(_RANDOM_SEED): zsig = np.random.normal(0, 0.01, size=Xin.shape) p2 = models.Polynomial2D(3) p2.parameters = np.arange(10) / 1.2 z = p2(Xin, Yin) pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig, weights=zsig ** (-2)) assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2)) @pytest.mark.parametrize( "fixed, warns", [ ({}, True), # tests fitting non-fixed parameters models produces warnings ( {"c1_0": True}, True, ), # tests fitting fixed par models produces warnings - #14037 ( {"c0_1": True}, False, ), # https://github.com/astropy/astropy/pull/14037#pullrequestreview-1191726872 ], ) def test_polynomial_poorly_conditioned(fixed, warns): p0 = models.Polynomial2D(degree=1, c0_0=3, c1_0=5, c0_1=0, fixed=fixed) fitter = LinearLSQFitter() x = [1, 2, 3, 4, 5] y = [1, 1, 1, 1, 1] values = p0(x, y) if warns: with pytest.warns( AstropyUserWarning, match="The fit may be poorly conditioned" ): p = fitter(p0, x, y, values) else: p = fitter(p0, x, y, values) assert np.allclose(p0.parameters, p.parameters, rtol=0, atol=1e-14) def test_linear_fitter_with_weights_flat(): """Same as the above #7035 test but with flattened inputs""" Xin, Yin = np.mgrid[0:21, 0:21] Xin, Yin = Xin.flatten(), Yin.flatten() fitter = LinearLSQFitter() with NumpyRNGContext(_RANDOM_SEED): zsig = np.random.normal(0, 0.01, size=Xin.shape) p2 = models.Polynomial2D(3) p2.parameters = np.arange(10) / 1.2 z = p2(Xin, Yin) pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig, weights=zsig ** (-2)) assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2)) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.filterwarnings("ignore:The fit may be unsuccessful") @pytest.mark.parametrize("fitter", non_linear_fitters + fitters) def test_fitters_interface(fitter): """ Test that ``**kwargs`` work with all optimizers. This is a basic smoke test. """ fitter = fitter() model = models.Gaussian1D(10, 4, 0.3) x = np.arange(21) y = model(x) if isinstance(fitter, SimplexLSQFitter): kwargs = {"maxiter": 79, "verblevel": 1, "acc": 1e-6} else: kwargs = {"maxiter": 77, "verblevel": 1, "epsilon": 1e-2, "acc": 1e-6} if isinstance(fitter, LevMarLSQFitter) or isinstance(fitter, _NLLSQFitter): kwargs.pop("verblevel") _ = fitter(model, x, y, **kwargs) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter_class", [SLSQPLSQFitter, SimplexLSQFitter]) def test_optimizers(fitter_class): fitter = fitter_class() # Test maxiter assert fitter._opt_method.maxiter == 100 fitter._opt_method.maxiter = 1000 assert fitter._opt_method.maxiter == 1000 # Test eps assert fitter._opt_method.eps == np.sqrt(np.finfo(float).eps) fitter._opt_method.eps = 1e-16 assert fitter._opt_method.eps == 1e-16 # Test acc assert fitter._opt_method.acc == 1e-7 fitter._opt_method.acc = 1e-16 assert fitter._opt_method.acc == 1e-16 # Test repr assert repr(fitter._opt_method) == f"{fitter._opt_method.__class__.__name__}()" fitparams = mk.MagicMock() final_func_val = mk.MagicMock() numiter = mk.MagicMock() funcalls = mk.MagicMock() exit_mode = 1 mess = mk.MagicMock() xtol = mk.MagicMock() if fitter_class == SLSQPLSQFitter: return_value = (fitparams, final_func_val, numiter, exit_mode, mess) fit_info = { "final_func_val": final_func_val, "numiter": numiter, "exit_mode": exit_mode, "message": mess, } else: return_value = (fitparams, final_func_val, numiter, funcalls, exit_mode) fit_info = { "final_func_val": final_func_val, "numiter": numiter, "exit_mode": exit_mode, "num_function_calls": funcalls, } with mk.patch.object( fitter._opt_method.__class__, "opt_method", return_value=return_value ): with pytest.warns(AstropyUserWarning, match=r"The fit may be unsuccessful; .*"): assert (fitparams, fit_info) == fitter._opt_method( mk.MagicMock(), mk.MagicMock(), mk.MagicMock(), xtol=xtol ) assert fit_info == fitter._opt_method.fit_info if isinstance(fitter, SLSQPLSQFitter): fitter._opt_method.acc == 1e-16 else: fitter._opt_method.acc == xtol @mk.patch.multiple(Optimization, __abstractmethods__=set()) def test_Optimization_abstract_call(): optimization = Optimization(mk.MagicMock()) MESSAGE = r"Subclasses should implement this method" with pytest.raises(NotImplementedError, match=MESSAGE): optimization() def test_fitting_with_outlier_removal_niter(): """ Test that FittingWithOutlierRemoval stops prior to reaching niter if the set of masked points has converged and correctly reports the actual number of iterations performed. """ # 2 rows with some noise around a constant level and 1 deviant point: x = np.arange(25) with NumpyRNGContext(_RANDOM_SEED): y = np.random.normal(loc=10.0, scale=1.0, size=(2, 25)) y[0, 14] = 100.0 # Fit 2 models with up to 5 iterations (should only take 2): fitter = FittingWithOutlierRemoval( fitter=LinearLSQFitter(), outlier_func=sigma_clip, niter=5, sigma_lower=3.0, sigma_upper=3.0, maxiters=1, ) model, mask = fitter(models.Chebyshev1D(2, n_models=2), x, y) # Confirm that only the deviant point was rejected, in 2 iterations: assert_equal(np.where(mask), [[0], [14]]) assert fitter.fit_info["niter"] == 2 # Refit just the first row without any rejection iterations, to ensure # there are no regressions for that special case: fitter = FittingWithOutlierRemoval( fitter=LinearLSQFitter(), outlier_func=sigma_clip, niter=0, sigma_lower=3.0, sigma_upper=3.0, maxiters=1, ) model, mask = fitter(models.Chebyshev1D(2), x, y[0]) # Confirm that there were no iterations or rejected points: assert mask.sum() == 0 assert fitter.fit_info["niter"] == 0 @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") class TestFittingUncertanties: """ Test that parameter covariance is calculated correctly for the fitters that do so (currently LevMarLSQFitter, LinearLSQFitter). """ example_1D_models = [models.Polynomial1D(2), models.Linear1D()] example_1D_sets = [ models.Polynomial1D(2, n_models=2, model_set_axis=False), models.Linear1D(n_models=2, slope=[1.0, 1.0], intercept=[0, 0]), ] def setup_class(self): np.random.seed(619) self.x = np.arange(10) self.x_grid = np.random.randint(0, 100, size=100).reshape(10, 10) self.y_grid = np.random.randint(0, 100, size=100).reshape(10, 10) self.rand_grid = np.random.random(100).reshape(10, 10) self.rand = self.rand_grid[0] @pytest.mark.parametrize( ("single_model", "model_set"), list(zip(example_1D_models, example_1D_sets)) ) @pytest.mark.parametrize("fitter", non_linear_fitters) def test_1d_models(self, single_model, model_set, fitter): """Test that fitting uncertainties are computed correctly for 1D models and 1D model sets. Use covariance/stds given by LevMarLSQFitter as a benchmark since they are returned by the numpy fitter. """ fitter = fitter(calc_uncertainties=True) linlsq_fitter = LinearLSQFitter(calc_uncertainties=True) # test 1D single models # fit single model w/ nonlinear fitter y = single_model(self.x) + self.rand with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"): fit_model = fitter(single_model, self.x, y) cov_model = fit_model.cov_matrix.cov_matrix # fit single model w/ linlsq fitter fit_model_linlsq = linlsq_fitter(single_model, self.x, y) cov_model_linlsq = fit_model_linlsq.cov_matrix.cov_matrix # check covariance, stds computed correctly computed assert_allclose(cov_model_linlsq, cov_model) assert_allclose(np.sqrt(np.diag(cov_model_linlsq)), fit_model_linlsq.stds.stds) # now test 1D model sets # fit set of models w/ linear fitter y = model_set(self.x, model_set_axis=False) + np.array([self.rand, self.rand]) fit_1d_set_linlsq = linlsq_fitter(model_set, self.x, y) cov_1d_set_linlsq = [j.cov_matrix for j in fit_1d_set_linlsq.cov_matrix] # make sure cov matrix from single model fit w/ levmar fitter matches # the cov matrix of first model in the set assert_allclose(cov_1d_set_linlsq[0], cov_model) assert_allclose( np.sqrt(np.diag(cov_1d_set_linlsq[0])), fit_1d_set_linlsq.stds[0].stds ) @pytest.mark.parametrize("fitter", non_linear_fitters) def test_2d_models(self, fitter): """ Test that fitting uncertainties are computed correctly for 2D models and 2D model sets. Use covariance/stds given by LevMarLSQFitter as a benchmark since they are returned by the numpy fitter. """ fitter = fitter(calc_uncertainties=True) linlsq_fitter = LinearLSQFitter(calc_uncertainties=True) single_model = models.Polynomial2D(2, c0_0=2) model_set = models.Polynomial2D( degree=2, n_models=2, c0_0=[2, 3], model_set_axis=False ) # fit single model w/ nonlinear fitter z_grid = single_model(self.x_grid, self.y_grid) + self.rand_grid with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"): fit_model = fitter(single_model, self.x_grid, self.y_grid, z_grid) cov_model = fit_model.cov_matrix.cov_matrix # fit single model w/ nonlinear fitter fit_model_linlsq = linlsq_fitter(single_model, self.x_grid, self.y_grid, z_grid) cov_model_linlsq = fit_model_linlsq.cov_matrix.cov_matrix assert_allclose(cov_model, cov_model_linlsq) assert_allclose(np.sqrt(np.diag(cov_model_linlsq)), fit_model_linlsq.stds.stds) # fit 2d model set z_grid = model_set(self.x_grid, self.y_grid) + np.array( (self.rand_grid, self.rand_grid) ) fit_2d_set_linlsq = linlsq_fitter(model_set, self.x_grid, self.y_grid, z_grid) cov_2d_set_linlsq = [j.cov_matrix for j in fit_2d_set_linlsq.cov_matrix] # make sure cov matrix from single model fit w/ levmar fitter matches # the cov matrix of first model in the set assert_allclose(cov_2d_set_linlsq[0], cov_model) assert_allclose( np.sqrt(np.diag(cov_2d_set_linlsq[0])), fit_2d_set_linlsq.stds[0].stds ) def test_covariance_std_printing_indexing(self, capsys): """ Test printing methods and indexing. """ # test str representation for Covariance/stds fitter = LinearLSQFitter(calc_uncertainties=True) mod = models.Linear1D() fit_mod = fitter(mod, self.x, mod(self.x) + self.rand) print(fit_mod.cov_matrix) captured = capsys.readouterr() assert "slope | 0.001" in captured.out assert "intercept| -0.005, 0.03" in captured.out print(fit_mod.stds) captured = capsys.readouterr() assert "slope | 0.032" in captured.out assert "intercept| 0.173" in captured.out # test 'pprint' for Covariance/stds print(fit_mod.cov_matrix.pprint(round_val=5, max_lines=1)) captured = capsys.readouterr() assert "slope | 0.00105" in captured.out assert "intercept" not in captured.out print(fit_mod.stds.pprint(max_lines=1, round_val=5)) captured = capsys.readouterr() assert "slope | 0.03241" in captured.out assert "intercept" not in captured.out # test indexing for Covariance class. assert fit_mod.cov_matrix[0, 0] == fit_mod.cov_matrix["slope", "slope"] # test indexing for stds class. assert fit_mod.stds[1] == fit_mod.stds["intercept"] @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", non_linear_fitters) def test_non_finite_error(fitter): """Regression test error introduced to solve issues #3575 and #12809""" x = np.array([1, 2, 3, 4, 5, np.nan, 7, np.inf]) y = np.array([9, np.nan, 11, np.nan, 13, np.nan, 15, 16]) m_init = models.Gaussian1D() fit = fitter() # Raise warning, notice fit fails due to nans with pytest.raises( NonFiniteValueError, match=r"Objective function has encountered.*" ): fit(m_init, x, y) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", non_linear_fitters) def test_non_finite_filter_1D(fitter): """Regression test filter introduced to remove non-finte values from data""" x = np.array([1, 2, 3, 4, 5, 6, 7, 8]) y = np.array([9, np.nan, 11, np.nan, 13, np.nan, 15, np.inf]) m_init = models.Gaussian1D() fit = fitter() with pytest.warns( AstropyUserWarning, match=r"Non-Finite input data has been removed by the fitter", ): fit(m_init, x, y, filter_non_finite=True) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", non_linear_fitters) def test_non_finite_filter_2D(fitter): """Regression test filter introduced to remove non-finte values from data""" x, y = np.mgrid[0:10, 0:10] m_true = models.Gaussian2D(amplitude=1, x_mean=5, y_mean=5, x_stddev=2, y_stddev=2) with NumpyRNGContext(_RANDOM_SEED): z = m_true(x, y) + np.random.rand(*x.shape) z[0, 0] = np.nan z[3, 3] = np.inf z[7, 5] = -np.inf m_init = models.Gaussian2D() fit = fitter() with pytest.warns( AstropyUserWarning, match=r"Non-Finite input data has been removed by the fitter", ): fit(m_init, x, y, z, filter_non_finite=True) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.filterwarnings(r"ignore:Model is linear in parameters*") @pytest.mark.parametrize("fitter", non_linear_fitters) def test_non_linear_fit_zero_degree_polynomial_with_weights(fitter): """ Regression test for issue #13617 Issue: Weighted non-linear weighted fits of O-degree polynomials cause an error to be raised by scipy. Fix: There should be no error raised in this circumstance """ model = models.Polynomial1D(0, c0=0) fitter = fitter() x = np.arange(10, dtype=float) y = np.ones((10,)) weights = np.ones((10,)) fit = fitter(model, x, y) assert_almost_equal(fit.c0, 1.0) fit = fitter(model, x, y, weights=weights) assert_almost_equal(fit.c0, 1.0)
d16657e731c5be86982601be09cdea2aeb1003c1eedf9047bd3129c581d3cbb1
# Licensed under a 3-clause BSD style license - see LICENSE.rst # pylint: disable=invalid-name import numpy as np import pytest from numpy.testing import assert_allclose, assert_array_equal, assert_array_less from astropy import units as u from astropy.coordinates import Angle from astropy.modeling import InputParameterError, fitting, models from astropy.utils.compat.optional_deps import HAS_SCIPY from astropy.utils.exceptions import AstropyUserWarning fitters = [ fitting.LevMarLSQFitter, fitting.TRFLSQFitter, fitting.LMLSQFitter, fitting.DogBoxLSQFitter, ] def test_sigma_constant(): """ Test that the GAUSSIAN_SIGMA_TO_FWHM constant matches the gaussian_sigma_to_fwhm constant in astropy.stats. We define it manually in astropy.modeling to avoid importing from astropy.stats. """ from astropy.modeling.functional_models import GAUSSIAN_SIGMA_TO_FWHM from astropy.stats.funcs import gaussian_sigma_to_fwhm assert gaussian_sigma_to_fwhm == GAUSSIAN_SIGMA_TO_FWHM def test_Trapezoid1D(): """Regression test for https://github.com/astropy/astropy/issues/1721""" model = models.Trapezoid1D(amplitude=4.2, x_0=2.0, width=1.0, slope=3) xx = np.linspace(0, 4, 8) yy = model(xx) yy_ref = [0.0, 1.41428571, 3.12857143, 4.2, 4.2, 3.12857143, 1.41428571, 0.0] assert_allclose(yy, yy_ref, rtol=0, atol=1e-6) def test_Gaussian1D(): model = models.Gaussian1D(4.2, 1.7, stddev=5.1) x = np.mgrid[0:5] g = model(x) g_ref = [3.97302977, 4.16062403, 4.19273985, 4.06574509, 3.79389376] assert_allclose(g, g_ref, rtol=0, atol=1e-6) assert_allclose(model.fwhm, 12.009582229657841) def test_Gaussian2D(): """ Test rotated elliptical Gaussian2D model. https://github.com/astropy/astropy/pull/2038 """ model = models.Gaussian2D( 4.2, 1.7, 3.1, x_stddev=5.1, y_stddev=3.3, theta=np.pi / 6.0 ) y, x = np.mgrid[0:5, 0:5] g = model(x, y) g_ref = [ [3.01907812, 2.99051889, 2.81271552, 2.5119566, 2.13012709], [3.55982239, 3.6086023, 3.4734158, 3.17454575, 2.75494838], [3.88059142, 4.0257528, 3.96554926, 3.70908389, 3.29410187], [3.91095768, 4.15212857, 4.18567526, 4.00652015, 3.64146544], [3.6440466, 3.95922417, 4.08454159, 4.00113878, 3.72161094], ] assert_allclose(g, g_ref, rtol=0, atol=1e-6) assert_allclose( [model.x_fwhm, model.y_fwhm], [12.009582229657841, 7.7709061486021325] ) def test_Gaussian2DCovariance(): """ Test rotated elliptical Gaussian2D model when cov_matrix is input. https://github.com/astropy/astropy/pull/2199 """ cov_matrix = [[49.0, -16.0], [-16.0, 9.0]] model = models.Gaussian2D(17.0, 2.0, 2.5, cov_matrix=cov_matrix) y, x = np.mgrid[0:5, 0:5] g = model(x, y) g_ref = [ [4.3744505, 5.8413977, 7.42988694, 9.00160175, 10.38794269], [8.83290201, 10.81772851, 12.61946384, 14.02225593, 14.84113227], [13.68528889, 15.37184621, 16.44637743, 16.76048705, 16.26953638], [16.26953638, 16.76048705, 16.44637743, 15.37184621, 13.68528889], [14.84113227, 14.02225593, 12.61946384, 10.81772851, 8.83290201], ] assert_allclose(g, g_ref, rtol=0, atol=1e-6) # Test bad cov_matrix shape cov_matrix = [[49.0, 3.14, -16.0], [3.14, -16.0, 9.0], [-16, 27, 3.14]] MESSAGE = r"Covariance matrix must be 2x2" with pytest.raises(ValueError, match=MESSAGE): models.Gaussian2D(17.0, 2.0, 2.5, cov_matrix=cov_matrix) def test_Gaussian2DRotation(): amplitude = 42 x_mean, y_mean = 0, 0 x_stddev, y_stddev = 2, 3 theta = Angle(10, "deg") pars = dict( amplitude=amplitude, x_mean=x_mean, y_mean=y_mean, x_stddev=x_stddev, y_stddev=y_stddev, ) rotation = models.Rotation2D(angle=theta.degree) point1 = (x_mean + 2 * x_stddev, y_mean + 2 * y_stddev) point2 = rotation(*point1) g1 = models.Gaussian2D(theta=0, **pars) g2 = models.Gaussian2D(theta=theta.radian, **pars) value1 = g1(*point1) value2 = g2(*point2) assert_allclose(value1, value2) def test_Gaussian2D_invalid_inputs(): x_stddev = 5.1 y_stddev = 3.3 theta = 10 cov_matrix = [[49.0, -16.0], [-16.0, 9.0]] # first make sure the valid ones are OK models.Gaussian2D() models.Gaussian2D(x_stddev=x_stddev, y_stddev=y_stddev, theta=theta) models.Gaussian2D(x_stddev=None, y_stddev=y_stddev, theta=theta) models.Gaussian2D(x_stddev=x_stddev, y_stddev=None, theta=theta) models.Gaussian2D(x_stddev=x_stddev, y_stddev=y_stddev, theta=None) models.Gaussian2D(cov_matrix=cov_matrix) MESSAGE = r"Cannot specify both cov_matrix and x/y_stddev/theta" with pytest.raises(InputParameterError, match=MESSAGE): models.Gaussian2D(x_stddev=0, cov_matrix=cov_matrix) with pytest.raises(InputParameterError, match=MESSAGE): models.Gaussian2D(y_stddev=0, cov_matrix=cov_matrix) with pytest.raises(InputParameterError, match=MESSAGE): models.Gaussian2D(theta=0, cov_matrix=cov_matrix) def test_Gaussian2D_theta(): theta = Angle(90, "deg") model1 = models.Gaussian2D(1, 25, 25, 15, 5, theta=theta) theta2 = np.pi / 2.0 model2 = models.Gaussian2D(1, 25, 25, 15, 5, theta=theta2) assert model1.theta.quantity.to("radian").value == model2.theta.value assert model1.bounding_box == model2.bounding_box assert model1(619.42, 31.314) == model2(619.42, 31.314) @pytest.mark.parametrize("gamma", (10, -10)) def test_moffat_fwhm(gamma): ans = 34.641016151377542 kwargs = {"gamma": gamma, "alpha": 0.5} m1 = models.Moffat1D(**kwargs) m2 = models.Moffat2D(**kwargs) assert_allclose([m1.fwhm, m2.fwhm], ans) assert_array_less(0, [m1.fwhm, m2.fwhm]) def test_RedshiftScaleFactor(): """Like ``test_ScaleModel()``.""" # Scale by a scalar m = models.RedshiftScaleFactor(0.4) assert m(0) == 0 assert_array_equal(m([1, 2]), [1.4, 2.8]) assert_allclose(m.inverse(m([1, 2])), [1, 2]) # Scale by a list m = models.RedshiftScaleFactor([-0.5, 0, 0.5], n_models=3) assert_array_equal(m(0), 0) assert_array_equal(m([1, 2], model_set_axis=False), [[0.5, 1], [1, 2], [1.5, 3]]) assert_allclose( m.inverse(m([1, 2], model_set_axis=False)), [[1, 2], [1, 2], [1, 2]] ) def test_RedshiftScaleFactor_inverse(): m = models.RedshiftScaleFactor(1.2345) assert_allclose(m.inverse(m(6.789)), 6.789) def test_RedshiftScaleFactor_inverse_bounding_box(): model = models.RedshiftScaleFactor(2) model.bounding_box = (1, 5) assert model.bounding_box == (1, 5) inverse_model = model.inverse assert inverse_model.bounding_box == (3, 15) assert_allclose( inverse_model(model(4, with_bounding_box=True), with_bounding_box=True), 4 ) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_RedshiftScaleFactor_model_levmar_fit(): """Test fitting RedshiftScaleFactor model with LevMarLSQFitter.""" init_model = models.RedshiftScaleFactor() x = np.arange(10) y = 2.7174 * x fitter = fitting.LevMarLSQFitter() fitted_model = fitter(init_model, x, y) assert_allclose(fitted_model.parameters, [1.7174]) def test_Ellipse2D(): """Test Ellipse2D model.""" amplitude = 7.5 x0, y0 = 15, 15 theta = Angle(45, "deg") em = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta.radian) y, x = np.mgrid[0:30, 0:30] e = em(x, y) assert np.all(e[e > 0] == amplitude) assert e[y0, x0] == amplitude rotation = models.Rotation2D(angle=theta.degree) point1 = [2, 0] # Rotation2D center is (0, 0) point2 = rotation(*point1) point1 = np.array(point1) + [x0, y0] point2 = np.array(point2) + [x0, y0] e1 = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta=0.0) e2 = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta=theta.radian) assert e1(*point1) == e2(*point2) def test_Ellipse2D_circular(): """Test that circular Ellipse2D agrees with Disk2D [3736].""" amplitude = 7.5 radius = 10 size = (radius * 2) + 1 y, x = np.mgrid[0:size, 0:size] ellipse = models.Ellipse2D(amplitude, radius, radius, radius, radius, theta=0)(x, y) disk = models.Disk2D(amplitude, radius, radius, radius)(x, y) assert np.all(ellipse == disk) def test_Ellipse2D_theta(): theta = Angle(90, "deg") model1 = models.Ellipse2D(1, 25, 25, 15, 5, theta=theta) theta2 = np.pi / 2.0 model2 = models.Ellipse2D(1, 25, 25, 15, 5, theta=theta2) assert model1.theta.quantity.to("radian").value == model2.theta.value assert model1.bounding_box == model2.bounding_box assert model1(619.42, 31.314) == model2(619.42, 31.314) def test_Scale_inverse(): m = models.Scale(1.2345) assert_allclose(m.inverse(m(6.789)), 6.789) def test_Scale_inverse_bounding_box(): model = models.Scale(2) model.bounding_box = (1, 5) assert model.bounding_box == (1, 5) inverse_model = model.inverse assert inverse_model.bounding_box == (2, 10) assert ( inverse_model(model(4, with_bounding_box=True), with_bounding_box=True) == 4.0 ) def test_Multiply_inverse(): m = models.Multiply(1.2345) assert_allclose(m.inverse(m(6.789)), 6.789) def test_Multiply_inverse_bounding_box(): model = models.Multiply(2) model.bounding_box = (1, 5) assert model.bounding_box == (1, 5) inverse_model = model.inverse assert inverse_model.bounding_box == (2, 10) assert ( inverse_model(model(4, with_bounding_box=True), with_bounding_box=True) == 4.0 ) def test_Shift_inverse(): m = models.Shift(1.2345) assert_allclose(m.inverse(m(6.789)), 6.789) def test_Shift_inverse_bounding_box(): model = models.Shift(10) model.bounding_box = (1, 5) assert model.bounding_box == (1, 5) inverse_model = model.inverse assert inverse_model.bounding_box == (11, 15) assert ( inverse_model(model(4, with_bounding_box=True), with_bounding_box=True) == 4.0 ) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", fitters) def test_Shift_model_levmar_fit(fitter): """Test fitting Shift model with LevMarLSQFitter (issue #6103).""" fitter = fitter() init_model = models.Shift() x = np.arange(10) y = x + 0.1 with pytest.warns(AstropyUserWarning, match="Model is linear in parameters"): fitted_model = fitter(init_model, x, y) assert_allclose(fitted_model.parameters, [0.1], atol=1e-15) def test_Shift_model_set_linear_fit(): """Test linear fitting of Shift model (issue #6103).""" init_model = models.Shift(offset=[0, 0], n_models=2) x = np.arange(10) yy = np.array([x + 0.1, x - 0.2]) fitter = fitting.LinearLSQFitter() fitted_model = fitter(init_model, x, yy) assert_allclose(fitted_model.parameters, [0.1, -0.2], atol=1e-15) @pytest.mark.parametrize("Model", (models.Scale, models.Multiply)) def test_Scale_model_set_linear_fit(Model): """Test linear fitting of Scale model (#6103).""" init_model = Model(factor=[0, 0], n_models=2) x = np.arange(-3, 7) yy = np.array([1.15 * x, 0.96 * x]) fitter = fitting.LinearLSQFitter() fitted_model = fitter(init_model, x, yy) assert_allclose(fitted_model.parameters, [1.15, 0.96], atol=1e-15) @pytest.mark.parametrize("Model", (models.Scale, models.Multiply)) def test_Scale_model_evaluate_without_units(Model): m = Model(factor=4 * u.m) kwargs = {"x": 3 * u.m, "y": 7 * u.m} mnu = m.without_units_for_data(**kwargs) x = np.linspace(-1, 1, 100) assert_allclose(mnu(x), 4 * x) # https://github.com/astropy/astropy/issues/6178 def test_Ring2D_rout(): # Test with none of r_in, r_out, width specified m = models.Ring2D(amplitude=1, x_0=1, y_0=1) assert m.amplitude.value == 1 assert m.x_0.value == 1 assert m.y_0.value == 1 assert m.r_in.value == 1 assert m.width.value == 1 # Test with r_in specified only m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=4) assert m.amplitude.value == 1 assert m.x_0.value == 1 assert m.y_0.value == 1 assert m.r_in.value == 4 assert m.width.value == 1 # Test with r_out specified only m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=7) assert m.amplitude.value == 1 assert m.x_0.value == 1 assert m.y_0.value == 1 assert m.r_in.value == 1 assert m.width.value == 6 # Error when r_out is too small for default r_in MESSAGE = r"r_in=.* and width=.* must both be >=0" with pytest.raises(InputParameterError, match=MESSAGE): models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=0.5) # Test with width specified only m = models.Ring2D(amplitude=1, x_0=1, y_0=1, width=11) assert m.amplitude.value == 1 assert m.x_0.value == 1 assert m.y_0.value == 1 assert m.r_in.value == 1 assert m.width.value == 11 # Test with r_in and r_out specified only m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=2, r_out=5) assert m.amplitude.value == 1 assert m.x_0.value == 1 assert m.y_0.value == 1 assert m.r_in.value == 2 assert m.width.value == 3 # Error when r_out is smaller than r_in with pytest.raises(InputParameterError, match=MESSAGE): models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=1, r_in=4) # Test with r_in and width specified only m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=2, width=4) assert m.amplitude.value == 1 assert m.x_0.value == 1 assert m.y_0.value == 1 assert m.r_in.value == 2 assert m.width.value == 4 # Test with r_out and width specified only m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=12, width=7) assert m.amplitude.value == 1 assert m.x_0.value == 1 assert m.y_0.value == 1 assert m.r_in.value == 5 assert m.width.value == 7 # Error when width is larger than r_out with pytest.raises(InputParameterError, match=MESSAGE): models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=1, width=4) # Test with r_in, r_out, and width all specified m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=3, r_out=11, width=8) assert m.amplitude.value == 1 assert m.x_0.value == 1 assert m.y_0.value == 1 assert m.r_in.value == 3 assert m.width.value == 8 # error when specifying all MESSAGE = r"Width must be r_out - r_in" with pytest.raises(InputParameterError, match=MESSAGE): models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=3, r_out=11, width=7) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", fitters) def test_Voigt1D(fitter): fitter = fitter() voi = models.Voigt1D(amplitude_L=-0.5, x_0=1.0, fwhm_L=5.0, fwhm_G=5.0) xarr = np.linspace(-5.0, 5.0, num=40) yarr = voi(xarr) voi_init = models.Voigt1D(amplitude_L=-1.0, x_0=1.0, fwhm_L=5.0, fwhm_G=5.0) voi_fit = fitter(voi_init, xarr, yarr) assert_allclose(voi_fit.param_sets, voi.param_sets) # Invalid method MESSAGE = r"Not a valid method for Voigt1D Faddeeva function: test" with pytest.raises(ValueError, match=MESSAGE): models.Voigt1D(method="test") @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("algorithm", ("humlicek2", "wofz")) def test_Voigt1D_norm(algorithm): """Test integral of normalized Voigt profile.""" from scipy.integrate import quad voi = models.Voigt1D( amplitude_L=1.0 / np.pi, x_0=0.0, fwhm_L=2.0, fwhm_G=1.5, method=algorithm ) if algorithm == "wofz": atol = 1e-14 else: atol = 1e-8 assert_allclose(quad(voi, -np.inf, np.inf)[0], 1.0, atol=atol) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("doppler", (1.0e-3, 1.0e-2, 0.1, 0.5, 1.0, 2.5, 5.0, 10)) def test_Voigt1D_hum2(doppler): """ Verify accuracy of Voigt profile in Humlicek approximation to Faddeeva.cc (SciPy). """ x = np.linspace(-20, 20, 400001) voi_w = models.Voigt1D( amplitude_L=2.0 / np.pi, fwhm_L=1.0, fwhm_G=doppler, method="wofz" ) vf_w = voi_w(x) dvda_w = voi_w.fit_deriv( x, x_0=0, amplitude_L=2.0 / np.pi, fwhm_L=1.0, fwhm_G=doppler ) voi_h = models.Voigt1D( amplitude_L=2.0 / np.pi, fwhm_L=1.0, fwhm_G=doppler, method="humlicek2" ) vf_h = voi_h(x) dvda_h = voi_h.fit_deriv( x, x_0=0, amplitude_L=2.0 / np.pi, fwhm_L=1.0, fwhm_G=doppler ) assert_allclose(vf_h, vf_w, rtol=1e-7 * (2 + 1 / np.sqrt(doppler))) assert_allclose(dvda_h, dvda_w, rtol=1e-9, atol=1e-7 * (1 + 30 / doppler)) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", fitters) def test_KingProjectedAnalytic1D_fit(fitter): fitter = fitter() km = models.KingProjectedAnalytic1D(amplitude=1, r_core=1, r_tide=2) xarr = np.linspace(0.1, 2, 10) yarr = km(xarr) km_init = models.KingProjectedAnalytic1D(amplitude=1, r_core=1, r_tide=1) km_fit = fitter(km_init, xarr, yarr) assert_allclose(km_fit.param_sets, km.param_sets) assert_allclose(km_fit.concentration, 0.30102999566398136) @pytest.mark.parametrize("model", [models.Exponential1D(), models.Logarithmic1D()]) def test_ExponentialAndLogarithmic1D_fit(model): xarr = np.linspace(0.1, 10.0, 200) assert_allclose(xarr, model.inverse(model(xarr))) @pytest.mark.parametrize("model", [models.Exponential1D(), models.Logarithmic1D()]) def test_ExponentialAndLogarithmic_set_tau(model): MESSAGE = r"0 is not an allowed value for tau" with pytest.raises(ValueError, match=MESSAGE): model.tau = 0 def test_Linear1D_inverse(): model = models.Linear1D(slope=4, intercept=-12) inverse = model.inverse assert inverse.slope == 1 / 4 assert inverse.intercept == 3 @pytest.mark.parametrize( "trig", [ (models.Sine1D, [-0.25, 0.25]), (models.ArcSine1D, [-0.25, 0.25]), (models.Cosine1D, [0, 0.5]), (models.ArcCosine1D, [0, 0.5]), (models.Tangent1D, [-0.25, 0.25]), (models.ArcTangent1D, [-0.25, 0.25]), ], ) def test_trig_inverse(trig): mdl = trig[0]() lower, upper = trig[1] x = np.arange(lower, upper, 0.01) assert_allclose(mdl.inverse(mdl(x)), x, atol=1e-10) assert_allclose(mdl(mdl.inverse(x)), x, atol=1e-10) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_Sersic2D_theta(): theta = Angle(90, "deg") model1 = models.Sersic2D(1, 5, 4, 25, 25, 0.5, theta=theta) theta2 = np.pi / 2.0 model2 = models.Sersic2D(1, 5, 4, 25, 25, 0.5, theta=theta2) assert model1.theta.quantity.to("radian").value == model2.theta.value assert model1(619.42, 31.314) == model2(619.42, 31.314)
70b03ad37c4d278fb38cb870ee37a975a927f8b46877e78dfc10fc98ced27191
# Licensed under a 3-clause BSD style license - see LICENSE.rst # pylint: disable=invalid-name import re from inspect import Parameter import numpy as np import pytest from astropy.modeling.utils import ( _SpecialOperatorsDict, _validate_domain_window, get_inputs_and_params, poly_map_domain, ) def test_poly_map_domain(): oldx = np.array([1, 2, 3, 4]) # test shift/scale assert (poly_map_domain(oldx, (-4, 4), (-3, 3)) == [0.75, 1.5, 2.25, 3]).all() # errors MESSAGE = r'Expected "domain" and "window" to be a tuple of size 2' with pytest.raises(ValueError, match=MESSAGE): poly_map_domain(oldx, (-4,), (-3, 3)) with pytest.raises(ValueError, match=MESSAGE): poly_map_domain(oldx, (-4, 4, -4), (-3, 3)) with pytest.raises(ValueError, match=MESSAGE): poly_map_domain(oldx, (-4, 4), (-3,)) with pytest.raises(ValueError, match=MESSAGE): poly_map_domain(oldx, (-4, 4), (-3, 3, -3)) def test__validate_domain_window(): # Test if None assert _validate_domain_window(None) is None # Test normal assert _validate_domain_window((-2, 2)) == (-2, 2) assert _validate_domain_window([-2, 2]) == (-2, 2) assert _validate_domain_window(np.array([-2, 2])) == (-2, 2) # Test error MESSAGE = r"domain and window should be tuples of size 2" with pytest.raises(ValueError, match=MESSAGE): _validate_domain_window((-2, 2, -2)) with pytest.raises(ValueError, match=MESSAGE): _validate_domain_window((-2,)) with pytest.raises(ValueError, match=MESSAGE): _validate_domain_window([-2]) with pytest.raises(ValueError, match=MESSAGE): _validate_domain_window(np.array([-2])) with pytest.raises(ValueError, match=MESSAGE): _validate_domain_window(-2) def test_get_inputs_and_params(): # test normal def func1(input0, input1, param0=5, param1=7): pass inputs, params = get_inputs_and_params(func1) for index, _input in enumerate(inputs): assert isinstance(_input, Parameter) assert _input.name == f"input{index}" assert _input.kind == _input.POSITIONAL_OR_KEYWORD assert _input.default == Parameter.empty default = [5, 7] for index, param in enumerate(params): assert isinstance(param, Parameter) assert param.name == f"param{index}" assert param.kind == param.POSITIONAL_OR_KEYWORD assert param.default == default[index] # Error MESSAGE = re.escape("Signature must not have *args or **kwargs") def func2(input0, input1, *args, param0=5, param1=7): pass def func3(input0, input1, param0=5, param1=7, **kwargs): pass with pytest.raises(ValueError, match=MESSAGE): get_inputs_and_params(func2) with pytest.raises(ValueError, match=MESSAGE): get_inputs_and_params(func3) class Test_SpecialOperatorsDict: def setup_method(self): self.key = "test" self.val = "value" def test__set_value(self): special_operators = _SpecialOperatorsDict() assert self.key not in special_operators special_operators._set_value(self.key, self.val) assert self.key in special_operators assert special_operators[self.key] == self.val with pytest.raises(ValueError, match='Special operator "test" already exists'): special_operators._set_value(self.key, self.val) def test___setitem__(self): special_operators = _SpecialOperatorsDict() assert self.key not in special_operators with pytest.deprecated_call(): special_operators[self.key] = self.val assert self.key in special_operators assert special_operators[self.key] == self.val def test__SpecialOperatorsDict__get_unique_id(self): special_operators = _SpecialOperatorsDict() assert special_operators._unique_id == 0 assert special_operators._get_unique_id() == 1 assert special_operators._unique_id == 1 assert special_operators._get_unique_id() == 2 assert special_operators._unique_id == 2 assert special_operators._get_unique_id() == 3 assert special_operators._unique_id == 3 def test__SpecialOperatorsDict_add(self): special_operators = _SpecialOperatorsDict() operator_name = "test" operator = "operator" key0 = special_operators.add(operator_name, operator) assert key0 == (operator_name, special_operators._unique_id) assert key0 in special_operators assert special_operators[key0] == operator key1 = special_operators.add(operator_name, operator) assert key1 == (operator_name, special_operators._unique_id) assert key1 in special_operators assert special_operators[key1] == operator assert key0 != key1
7a64109eec60eb5872a91b3ca88e4ed98a9bec1ab9f3530a3d18542dcab0f544
# Licensed under a 3-clause BSD style license - see LICENSE.rst: """ Tests for model evaluation. Compare the results of some models with other programs. """ import unittest.mock as mk import numpy as np # pylint: disable=invalid-name, no-member import pytest from numpy.testing import assert_allclose, assert_equal import astropy.modeling.tabular as tabular_models from astropy import units as u from astropy.modeling import fitting, models from astropy.modeling.bounding_box import ModelBoundingBox from astropy.modeling.core import FittableModel, Model, _ModelMeta from astropy.modeling.models import Gaussian2D from astropy.modeling.parameters import InputParameterError, Parameter from astropy.modeling.polynomial import PolynomialBase from astropy.modeling.powerlaws import ( BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D, PowerLaw1D, SmoothlyBrokenPowerLaw1D, ) from astropy.modeling.separable import separability_matrix from astropy.tests.helper import assert_quantity_allclose from astropy.utils import NumpyRNGContext, minversion from astropy.utils.compat.optional_deps import HAS_SCIPY from .example_models import models_1D, models_2D fitters = [ fitting.LevMarLSQFitter, fitting.TRFLSQFitter, fitting.LMLSQFitter, fitting.DogBoxLSQFitter, ] @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", fitters) def test_custom_model(fitter, amplitude=4, frequency=1): fitter = fitter() def sine_model(x, amplitude=4, frequency=1): """ Model function """ return amplitude * np.sin(2 * np.pi * frequency * x) def sine_deriv(x, amplitude=4, frequency=1): """ Jacobian of model function, e.g. derivative of the function with respect to the *parameters* """ da = np.sin(2 * np.pi * frequency * x) df = 2 * np.pi * x * amplitude * np.cos(2 * np.pi * frequency * x) return np.vstack((da, df)) SineModel = models.custom_model(sine_model, fit_deriv=sine_deriv) x = np.linspace(0, 4, 50) sin_model = SineModel() sin_model.evaluate(x, 5.0, 2.0) sin_model.fit_deriv(x, 5.0, 2.0) np.random.seed(0) data = sin_model(x) + np.random.rand(len(x)) - 0.5 model = fitter(sin_model, x, data) assert np.all( ( np.array([model.amplitude.value, model.frequency.value]) - np.array([amplitude, frequency]) ) < 0.001 ) def test_custom_model_init(): @models.custom_model def SineModel(x, amplitude=4, frequency=1): """Model function""" return amplitude * np.sin(2 * np.pi * frequency * x) sin_model = SineModel(amplitude=2.0, frequency=0.5) assert sin_model.amplitude == 2.0 assert sin_model.frequency == 0.5 def test_custom_model_defaults(): @models.custom_model def SineModel(x, amplitude=4, frequency=1): """Model function""" return amplitude * np.sin(2 * np.pi * frequency * x) sin_model = SineModel() assert SineModel.amplitude.default == 4 assert SineModel.frequency.default == 1 assert sin_model.amplitude == 4 assert sin_model.frequency == 1 def test_inconsistent_input_shapes(): g = Gaussian2D() x = np.arange(-1.0, 1, 0.2) y = x.copy() # check scalar input broadcasting works assert np.abs(g(x, 0) - g(x, 0 * x)).sum() == 0 # but not array broadcasting x.shape = (10, 1) y.shape = (1, 10) result = g(x, y) assert result.shape == (10, 10) def test_custom_model_bounding_box(): """Test bounding box evaluation for a 3D model""" def ellipsoid(x, y, z, x0=13, y0=10, z0=8, a=4, b=3, c=2, amp=1): rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2 val = (rsq < 1) * amp return val class Ellipsoid3D(models.custom_model(ellipsoid)): @property def bounding_box(self): return ( (self.z0 - self.c, self.z0 + self.c), (self.y0 - self.b, self.y0 + self.b), (self.x0 - self.a, self.x0 + self.a), ) model = Ellipsoid3D() bbox = model.bounding_box zlim, ylim, xlim = bbox.bounding_box() dz, dy, dx = np.diff(bbox) / 2 z1, y1, x1 = np.mgrid[ slice(zlim[0], zlim[1] + 1), slice(ylim[0], ylim[1] + 1), slice(xlim[0], xlim[1] + 1), ] z2, y2, x2 = np.mgrid[ slice(zlim[0] - dz, zlim[1] + dz + 1), slice(ylim[0] - dy, ylim[1] + dy + 1), slice(xlim[0] - dx, xlim[1] + dx + 1), ] arr = model(x2, y2, z2, with_bounding_box=True) sub_arr = model(x1, y1, z1, with_bounding_box=True) # check for flux agreement assert abs(np.nansum(arr) - np.nansum(sub_arr)) < np.nansum(arr) * 1e-7 class Fittable2DModelTester: """ Test class for all two dimensional parametric models. Test values have to be defined in example_models.py. It currently test the model with different input types, evaluates the model at different positions and assures that it gives the correct values. And tests if the model works with non-linear fitters. This can be used as a base class for user defined model testing. """ def setup_class(self): self.N = 100 self.M = 100 self.eval_error = 0.0001 self.fit_error = 0.1 self.x = 5.3 self.y = 6.7 self.x1 = np.arange(1, 10, 0.1) self.y1 = np.arange(1, 10, 0.1) self.y2, self.x2 = np.mgrid[:10, :8] def test_input2D(self, model_class, test_parameters): """Test model with different input types.""" model = create_model(model_class, test_parameters) model(self.x, self.y) model(self.x1, self.y1) model(self.x2, self.y2) def test_eval2D(self, model_class, test_parameters): """Test model values add certain given points""" model = create_model(model_class, test_parameters) x = test_parameters["x_values"] y = test_parameters["y_values"] z = test_parameters["z_values"] assert np.all(np.abs(model(x, y) - z) < self.eval_error) def test_bounding_box2D(self, model_class, test_parameters): """Test bounding box evaluation""" model = create_model(model_class, test_parameters) # testing setter model.bounding_box = ((-5, 5), (-5, 5)) assert model.bounding_box == ((-5, 5), (-5, 5)) model.bounding_box = None MESSAGE = r"No bounding box is defined for this model .*" with pytest.raises(NotImplementedError, match=MESSAGE): model.bounding_box # test the exception of dimensions don't match MESSAGE = r"An interval must be some sort of sequence of length 2" with pytest.raises(ValueError, match=MESSAGE): model.bounding_box = (-5, 5) del model.bounding_box try: bbox = model.bounding_box except NotImplementedError: return ddx = 0.01 ylim, xlim = bbox x1 = np.arange(xlim[0], xlim[1], ddx) y1 = np.arange(ylim[0], ylim[1], ddx) x2 = np.concatenate( ( [xlim[0] - idx * ddx for idx in range(10, 0, -1)], x1, [xlim[1] + idx * ddx for idx in range(1, 10)], ) ) y2 = np.concatenate( ( [ylim[0] - idx * ddx for idx in range(10, 0, -1)], y1, [ylim[1] + idx * ddx for idx in range(1, 10)], ) ) inside_bbox = model(x1, y1) outside_bbox = model(x2, y2, with_bounding_box=True) outside_bbox = outside_bbox[~np.isnan(outside_bbox)] assert np.all(inside_bbox == outside_bbox) def test_bounding_box2D_peak(self, model_class, test_parameters): if not test_parameters.pop("bbox_peak", False): return model = create_model(model_class, test_parameters) bbox = model.bounding_box ylim, xlim = bbox dy, dx = np.diff(bbox) / 2 y1, x1 = np.mgrid[slice(ylim[0], ylim[1] + 1), slice(xlim[0], xlim[1] + 1)] y2, x2 = np.mgrid[ slice(ylim[0] - dy, ylim[1] + dy + 1), slice(xlim[0] - dx, xlim[1] + dx + 1) ] arr = model(x2, y2) sub_arr = model(x1, y1) # check for flux agreement assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * 1e-7 @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", fitters) def test_fitter2D(self, model_class, test_parameters, fitter): """Test if the parametric model works with the fitter.""" fitter = fitter() x_lim = test_parameters["x_lim"] y_lim = test_parameters["y_lim"] parameters = test_parameters["parameters"] model = create_model(model_class, test_parameters) if isinstance(parameters, dict): parameters = [parameters[name] for name in model.param_names] if "log_fit" in test_parameters: if test_parameters["log_fit"]: x = np.logspace(x_lim[0], x_lim[1], self.N) y = np.logspace(y_lim[0], y_lim[1], self.N) else: x = np.linspace(x_lim[0], x_lim[1], self.N) y = np.linspace(y_lim[0], y_lim[1], self.N) xv, yv = np.meshgrid(x, y) np.random.seed(0) # add 10% noise to the amplitude noise = np.random.rand(self.N, self.N) - 0.5 data = model(xv, yv) + 0.1 * parameters[0] * noise new_model = fitter(model, xv, yv, data) params = [getattr(new_model, name) for name in new_model.param_names] fixed = [param.fixed for param in params] expected = np.array([val for val, fixed in zip(parameters, fixed) if not fixed]) fitted = np.array([param.value for param in params if not param.fixed]) assert_allclose(fitted, expected, atol=self.fit_error) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", fitters) def test_deriv_2D(self, model_class, test_parameters, fitter): """ Test the derivative of a model by fitting with an estimated and analytical derivative. """ fitter = fitter() x_lim = test_parameters["x_lim"] y_lim = test_parameters["y_lim"] if model_class.fit_deriv is None or issubclass(model_class, PolynomialBase): return if "log_fit" in test_parameters: if test_parameters["log_fit"]: x = np.logspace(x_lim[0], x_lim[1], self.N) y = np.logspace(y_lim[0], y_lim[1], self.M) x_test = np.logspace(x_lim[0], x_lim[1], self.N * 10) y_test = np.logspace(y_lim[0], y_lim[1], self.M * 10) else: x = np.linspace(x_lim[0], x_lim[1], self.N) y = np.linspace(y_lim[0], y_lim[1], self.M) x_test = np.linspace(x_lim[0], x_lim[1], self.N * 10) y_test = np.linspace(y_lim[0], y_lim[1], self.M * 10) xv, yv = np.meshgrid(x, y) xv_test, yv_test = np.meshgrid(x_test, y_test) try: model_with_deriv = create_model( model_class, test_parameters, use_constraints=False, parameter_key="deriv_initial", ) model_no_deriv = create_model( model_class, test_parameters, use_constraints=False, parameter_key="deriv_initial", ) model = create_model( model_class, test_parameters, use_constraints=False, parameter_key="deriv_initial", ) except KeyError: model_with_deriv = create_model( model_class, test_parameters, use_constraints=False ) model_no_deriv = create_model( model_class, test_parameters, use_constraints=False ) model = create_model(model_class, test_parameters, use_constraints=False) # add 10% noise to the amplitude rsn = np.random.default_rng(0) amplitude = test_parameters["parameters"][0] n = 0.1 * amplitude * (rsn.random((self.M, self.N)) - 0.5) data = model(xv, yv) + n fitter_with_deriv = fitter new_model_with_deriv = fitter_with_deriv(model_with_deriv, xv, yv, data) fitter_no_deriv = fitter new_model_no_deriv = fitter_no_deriv( model_no_deriv, xv, yv, data, estimate_jacobian=True ) assert_allclose( new_model_with_deriv(xv_test, yv_test), new_model_no_deriv(xv_test, yv_test), rtol=1e-2, ) if model_class != Gaussian2D: assert_allclose( new_model_with_deriv.parameters, new_model_no_deriv.parameters, rtol=0.1 ) class Fittable1DModelTester: """ Test class for all one dimensional parametric models. Test values have to be defined in example_models.py. It currently test the model with different input types, evaluates the model at different positions and assures that it gives the correct values. And tests if the model works with non-linear fitters. This can be used as a base class for user defined model testing. """ # These models will fail fitting test, because built in fitting data # will produce non-finite values _non_finite_models = [ BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D, PowerLaw1D, SmoothlyBrokenPowerLaw1D, ] def setup_class(self): self.N = 100 self.M = 100 self.eval_error = 0.0001 self.fit_error = 0.11 self.x = 5.3 self.y = 6.7 self.x1 = np.arange(1, 10, 0.1) self.y1 = np.arange(1, 10, 0.1) self.y2, self.x2 = np.mgrid[:10, :8] @pytest.mark.filterwarnings(r"ignore:.*:RuntimeWarning") def test_input1D(self, model_class, test_parameters): """Test model with different input types.""" model = create_model(model_class, test_parameters) model(self.x) model(self.x1) model(self.x2) def test_eval1D(self, model_class, test_parameters): """ Test model values at certain given points """ model = create_model(model_class, test_parameters) x = test_parameters["x_values"] y = test_parameters["y_values"] assert_allclose(model(x), y, atol=self.eval_error) def test_bounding_box1D(self, model_class, test_parameters): """Test bounding box evaluation""" model = create_model(model_class, test_parameters) # testing setter model.bounding_box = (-5, 5) model.bounding_box = None MESSAGE = r"No bounding box is defined for this model .*" with pytest.raises(NotImplementedError, match=MESSAGE): model.bounding_box del model.bounding_box # test exception if dimensions don't match MESSAGE = r"An interval must be some sort of sequence of length 2" with pytest.raises(ValueError, match=MESSAGE): model.bounding_box = 5 try: bbox = model.bounding_box.bounding_box() except NotImplementedError: return ddx = 0.01 x1 = np.arange(bbox[0], bbox[1], ddx) x2 = np.concatenate( ( [bbox[0] - idx * ddx for idx in range(10, 0, -1)], x1, [bbox[1] + idx * ddx for idx in range(1, 10)], ) ) inside_bbox = model(x1) outside_bbox = model(x2, with_bounding_box=True) outside_bbox = outside_bbox[~np.isnan(outside_bbox)] assert np.all(inside_bbox == outside_bbox) def test_bounding_box1D_peak(self, model_class, test_parameters): if not test_parameters.pop("bbox_peak", False): return model = create_model(model_class, test_parameters) bbox = model.bounding_box if isinstance(model, models.Lorentz1D) or isinstance(model, models.Drude1D): rtol = 0.01 # 1% agreement is enough due to very extended wings ddx = 0.1 # Finer sampling to "integrate" flux for narrow peak else: rtol = 1e-7 ddx = 1 if isinstance(bbox, ModelBoundingBox): bbox = bbox.bounding_box() dx = np.diff(bbox) / 2 x1 = np.mgrid[slice(bbox[0], bbox[1] + 1, ddx)] x2 = np.mgrid[slice(bbox[0] - dx, bbox[1] + dx + 1, ddx)] arr = model(x2) sub_arr = model(x1) # check for flux agreement assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * rtol @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", fitters) def test_fitter1D(self, model_class, test_parameters, fitter): """ Test if the parametric model works with the fitter. """ SCIPY_LT_1_6 = not minversion("scipy", "1.6") if ( model_class == models.BrokenPowerLaw1D and fitter == fitting.TRFLSQFitter and SCIPY_LT_1_6 ): pytest.xfail(reason="TRF fitter fails for BrokenPowerLaw1D in scipy < 1.6") fitter = fitter() x_lim = test_parameters["x_lim"] parameters = test_parameters["parameters"] model = create_model(model_class, test_parameters) if isinstance(parameters, dict): parameters = [parameters[name] for name in model.param_names] if "log_fit" in test_parameters: if test_parameters["log_fit"]: x = np.logspace(x_lim[0], x_lim[1], self.N) else: x = np.linspace(x_lim[0], x_lim[1], self.N) np.random.seed(0) # add 10% noise to the amplitude relative_noise_amplitude = 0.01 data = (1 + relative_noise_amplitude * np.random.randn(len(x))) * model(x) new_model = fitter(model, x, data) # Only check parameters that were free in the fit params = [getattr(new_model, name) for name in new_model.param_names] fixed = [param.fixed for param in params] expected = np.array([val for val, fixed in zip(parameters, fixed) if not fixed]) fitted = np.array([param.value for param in params if not param.fixed]) assert_allclose(fitted, expected, atol=self.fit_error) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.filterwarnings(r"ignore:.*:RuntimeWarning") @pytest.mark.parametrize("fitter", fitters) def test_deriv_1D(self, model_class, test_parameters, fitter): """ Test the derivative of a model by comparing results with an estimated derivative. """ fitter = fitter() if model_class in self._non_finite_models: return x_lim = test_parameters["x_lim"] if model_class.fit_deriv is None or issubclass(model_class, PolynomialBase): return if "log_fit" in test_parameters: if test_parameters["log_fit"]: x = np.logspace(x_lim[0], x_lim[1], self.N) else: x = np.linspace(x_lim[0], x_lim[1], self.N) parameters = test_parameters["parameters"] model_with_deriv = create_model( model_class, test_parameters, use_constraints=False ) model_no_deriv = create_model( model_class, test_parameters, use_constraints=False ) # NOTE: PR 10644 replaced deprecated usage of RandomState but could not # find a new seed that did not cause test failure, resorted to hardcoding. # add 10% noise to the amplitude # fmt: off rsn_rand_1234567890 = np.array( [ 0.61879477, 0.59162363, 0.88868359, 0.89165480, 0.45756748, 0.77818808, 0.26706377, 0.99610621, 0.54009489, 0.53752161, 0.40099938, 0.70540579, 0.40518559, 0.94999075, 0.03075388, 0.13602495, 0.08297726, 0.42352224, 0.23449723, 0.74743526, 0.65177865, 0.68998682, 0.16413419, 0.87642114, 0.44733314, 0.57871104, 0.52377835, 0.62689056, 0.34869427, 0.26209748, 0.07498055, 0.17940570, 0.82999425, 0.98759822, 0.11326099, 0.63846415, 0.73056694, 0.88321124, 0.52721004, 0.66487673, 0.74209309, 0.94083846, 0.70123128, 0.29534353, 0.76134369, 0.77593881, 0.36985514, 0.89519067, 0.33082813, 0.86108824, 0.76897859, 0.61343376, 0.43870907, 0.91913538, 0.76958966, 0.51063556, 0.04443249, 0.57463611, 0.31382006, 0.41221713, 0.21531811, 0.03237521, 0.04166386, 0.73109303, 0.74556052, 0.64716325, 0.77575353, 0.64599254, 0.16885816, 0.48485480, 0.53844248, 0.99690349, 0.23657074, 0.04119088, 0.46501519, 0.35739006, 0.23002665, 0.53420791, 0.71639475, 0.81857486, 0.73994342, 0.07948837, 0.75688276, 0.13240193, 0.48465576, 0.20624753, 0.02298276, 0.54257873, 0.68123230, 0.35887468, 0.36296147, 0.67368397, 0.29505730, 0.66558885, 0.93652252, 0.36755130, 0.91787687, 0.75922703, 0.48668067, 0.45967890 ] ) # fmt: on n = 0.1 * parameters[0] * (rsn_rand_1234567890 - 0.5) data = model_with_deriv(x) + n fitter_with_deriv = fitter new_model_with_deriv = fitter_with_deriv(model_with_deriv, x, data) fitter_no_deriv = fitter new_model_no_deriv = fitter_no_deriv( model_no_deriv, x, data, estimate_jacobian=True ) assert_allclose( new_model_with_deriv.parameters, new_model_no_deriv.parameters, atol=0.15 ) def create_model( model_class, test_parameters, use_constraints=True, parameter_key="parameters" ): """Create instance of model class.""" constraints = {} if issubclass(model_class, PolynomialBase): return model_class(**test_parameters[parameter_key]) elif issubclass(model_class, FittableModel): if "requires_scipy" in test_parameters and not HAS_SCIPY: pytest.skip("SciPy not found") if use_constraints: if "constraints" in test_parameters: constraints = test_parameters["constraints"] return model_class(*test_parameters[parameter_key], **constraints) @pytest.mark.filterwarnings(r"ignore:Model is linear in parameters.*") @pytest.mark.filterwarnings(r"ignore:The fit may be unsuccessful.*") @pytest.mark.parametrize( ("model_class", "test_parameters"), sorted(models_1D.items(), key=lambda x: str(x[0])), ) class TestFittable1DModels(Fittable1DModelTester): pass @pytest.mark.filterwarnings(r"ignore:Model is linear in parameters.*") @pytest.mark.parametrize( ("model_class", "test_parameters"), sorted(models_2D.items(), key=lambda x: str(x[0])), ) class TestFittable2DModels(Fittable2DModelTester): pass def test_ShiftModel(): # Shift by a scalar m = models.Shift(42) assert m(0) == 42 assert_equal(m([1, 2]), [43, 44]) # Shift by a list m = models.Shift([42, 43], n_models=2) assert_equal(m(0), [42, 43]) assert_equal(m([1, 2], model_set_axis=False), [[43, 44], [44, 45]]) def test_ScaleModel(): # Scale by a scalar m = models.Scale(42) assert m(0) == 0 assert_equal(m([1, 2]), [42, 84]) # Scale by a list m = models.Scale([42, 43], n_models=2) assert_equal(m(0), [0, 0]) assert_equal(m([1, 2], model_set_axis=False), [[42, 84], [43, 86]]) def test_voigt_model(): """ Currently just tests that the model peaks at its origin. Regression test for https://github.com/astropy/astropy/issues/3942 """ m = models.Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9) x = np.arange(0, 10, 0.01) y = m(x) assert y[500] == y.max() # y[500] is right at the center def test_model_instance_repr(): m = models.Gaussian1D(1.5, 2.5, 3.5) assert repr(m) == "<Gaussian1D(amplitude=1.5, mean=2.5, stddev=3.5)>" @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_tabular_interp_1d(): """ Test Tabular1D model. """ points = np.arange(0, 5) values = [1.0, 10, 2, 45, -3] LookupTable = models.tabular_model(1) model = LookupTable(points=points, lookup_table=values) xnew = [0.0, 0.7, 1.4, 2.1, 3.9] ans1 = [1.0, 7.3, 6.8, 6.3, 1.8] assert_allclose(model(xnew), ans1) # Test evaluate without passing `points`. model = LookupTable(lookup_table=values) assert_allclose(model(xnew), ans1) # Test bounds error. xextrap = [0.0, 0.7, 1.4, 2.1, 3.9, 4.1] MESSAGE = r"One of the requested xi is out of bounds in dimension 0" with pytest.raises(ValueError, match=MESSAGE): model(xextrap) # test extrapolation and fill value model = LookupTable(lookup_table=values, bounds_error=False, fill_value=None) assert_allclose(model(xextrap), [1.0, 7.3, 6.8, 6.3, 1.8, -7.8]) # Test unit support xnew = xnew * u.nm ans1 = ans1 * u.nJy model = LookupTable(points=points * u.nm, lookup_table=values * u.nJy) assert_quantity_allclose(model(xnew), ans1) assert_quantity_allclose(model(xnew.to(u.nm)), ans1) assert model.bounding_box == (0 * u.nm, 4 * u.nm) # Test fill value unit conversion and unitless input on table with unit model = LookupTable( [1, 2, 3], [10, 20, 30] * u.nJy, bounds_error=False, fill_value=1e-33 * (u.W / (u.m * u.m * u.Hz)), ) assert_quantity_allclose(model(np.arange(5)), [100, 10, 20, 30, 100] * u.nJy) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_tabular_interp_2d(): table = np.array( [ [-0.04614432, -0.02512547, -0.00619557, 0.0144165, 0.0297525], [-0.04510594, -0.03183369, -0.01118008, 0.01201388, 0.02496205], [-0.05464094, -0.02804499, -0.00960086, 0.01134333, 0.02284104], [-0.04879338, -0.02539565, -0.00440462, 0.01795145, 0.02122417], [-0.03637372, -0.01630025, -0.00157902, 0.01649774, 0.01952131], ] ) points = np.arange(0, 5) points = (points, points) xnew = np.array([0.0, 0.7, 1.4, 2.1, 3.9]) LookupTable = models.tabular_model(2) model = LookupTable(points, table) znew = model(xnew, xnew) result = np.array([-0.04614432, -0.03450009, -0.02241028, -0.0069727, 0.01938675]) assert_allclose(znew, result, atol=1e-7) # test 2D arrays as input a = np.arange(12).reshape((3, 4)) y, x = np.mgrid[:3, :4] t = models.Tabular2D(lookup_table=a) r = t(y, x) assert_allclose(a, r) MESSAGE = r"Only n_models=1 is supported" with pytest.raises(NotImplementedError, match=MESSAGE): model = LookupTable(n_models=2) MESSAGE = r"Must provide a lookup table" with pytest.raises(ValueError, match=MESSAGE): model = LookupTable(points=([1.2, 2.3], [1.2, 6.7], [3, 4])) MESSAGE = r"lookup_table should be an array with 2 dimensions" with pytest.raises(ValueError, match=MESSAGE): model = LookupTable(lookup_table=[1, 2, 3]) MESSAGE = r"lookup_table should be an array with 2 dimensions" with pytest.raises(ValueError, match=MESSAGE): model = LookupTable(([1, 2], [3, 4]), [5, 6]) MESSAGE = r"points must all have the same unit" with pytest.raises(ValueError, match=MESSAGE): model = LookupTable(([1, 2] * u.m, [3, 4]), [[5, 6], [7, 8]]) MESSAGE = r"fill value is in Jy but expected to be unitless" with pytest.raises(ValueError, match=MESSAGE): model = LookupTable(points, table, bounds_error=False, fill_value=1 * u.Jy) # Test unit support points = points[0] * u.nm points = (points, points) xnew = xnew * u.nm model = LookupTable(points, table * u.nJy) result = result * u.nJy assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7 * u.nJy) xnew = xnew.to(u.m) assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7 * u.nJy) bbox = (0 * u.nm, 4 * u.nm) bbox = (bbox, bbox) assert model.bounding_box == bbox @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_tabular_nd(): a = np.arange(24).reshape((2, 3, 4)) x, y, z = np.mgrid[:2, :3, :4] tab = models.tabular_model(3) t = tab(lookup_table=a) result = t(x, y, z) assert_allclose(a, result) MESSAGE = r"Lookup table must have at least one dimension" with pytest.raises(ValueError, match=MESSAGE): models.tabular_model(0) def test_with_bounding_box(): """ Test the option to evaluate a model respecting its bunding_box. """ p = models.Polynomial2D(2) & models.Polynomial2D(2) m = models.Mapping((0, 1, 0, 1)) | p with NumpyRNGContext(1234567): m.parameters = np.random.rand(12) m.bounding_box = ((3, 9), (1, 8)) x, y = np.mgrid[:10, :10] a, b = m(x, y) aw, bw = m(x, y, with_bounding_box=True) ind = (~np.isnan(aw)).nonzero() assert_allclose(a[ind], aw[ind]) assert_allclose(b[ind], bw[ind]) aw, bw = m(x, y, with_bounding_box=True, fill_value=1000) ind = (aw != 1000).nonzero() assert_allclose(a[ind], aw[ind]) assert_allclose(b[ind], bw[ind]) # test the order of bbox is not reversed for 1D models p = models.Polynomial1D(1, c0=12, c1=2.3) p.bounding_box = (0, 5) assert p(1) == p(1, with_bounding_box=True) t3 = models.Shift(10) & models.Scale(2) & models.Shift(-1) t3.bounding_box = ((4.3, 6.9), (6, 15), (-1, 10)) assert_allclose( t3([1, 1], [7, 7], [3, 5], with_bounding_box=True), [[np.nan, 11], [np.nan, 14], [np.nan, 4]], ) trans3 = models.Shift(10) & models.Scale(2) & models.Shift(-1) trans3.bounding_box = ((4.3, 6.9), (6, 15), (-1, 10)) assert_allclose(trans3(1, 7, 5, with_bounding_box=True), [11, 14, 4]) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_tabular_with_bounding_box(): points = np.arange(5) values = np.array([1.5, 3.4, 6.7, 7, 32]) t = models.Tabular1D(points, values) result = t(1, with_bounding_box=True) assert result == 3.4 assert t.inverse(result, with_bounding_box=True) == 1.0 @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_tabular_bounding_box_with_units(): points = np.arange(5) * u.pix lt = np.arange(5) * u.AA t = models.Tabular1D(points, lt) result = t(1 * u.pix, with_bounding_box=True) assert result == 1.0 * u.AA assert t.inverse(result, with_bounding_box=True) == 1 * u.pix @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_tabular1d_inverse(): """Test that the Tabular1D inverse is defined""" points = np.arange(5) values = np.array([1.5, 3.4, 6.7, 7, 32]) t = models.Tabular1D(points, values) result = t.inverse((3.4, 6.7)) assert_allclose(result, np.array((1.0, 2.0))) # Check that it works for descending values in lookup_table t2 = models.Tabular1D(points, values[::-1]) assert_allclose(t2.inverse.points[0], t2.lookup_table[::-1]) result2 = t2.inverse((7, 6.7)) assert_allclose(result2, np.array((1.0, 2.0))) # Check that it errors on double-valued lookup_table points = np.arange(5) values = np.array([1.5, 3.4, 3.4, 32, 25]) t = models.Tabular1D(points, values) with pytest.raises(NotImplementedError, match=r""): t.inverse((3.4, 7.0)) # Check that Tabular2D.inverse raises an error table = np.arange(5 * 5).reshape(5, 5) points = np.arange(0, 5) points = (points, points) t3 = models.Tabular2D(points=points, lookup_table=table) with pytest.raises(NotImplementedError, match=r""): t3.inverse((3, 3)) # Check that it uses the same kwargs as the original model points = np.arange(5) values = np.array([1.5, 3.4, 6.7, 7, 32]) t = models.Tabular1D(points, values) MESSAGE = r"One of the requested xi is out of bounds in dimension 0" with pytest.raises(ValueError, match=MESSAGE): t.inverse(100) t = models.Tabular1D(points, values, bounds_error=False, fill_value=None) result = t.inverse(100) assert_allclose(t(result), 100) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_tabular_grid_shape_mismatch_error(): points = np.arange(5) lt = np.mgrid[0:5, 0:5][0] MESSAGE = r"Expected grid points in 2 directions, got 5." with pytest.raises(ValueError, match=MESSAGE): models.Tabular2D(points, lt) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_tabular_repr(): points = np.arange(5) lt = np.arange(5) t = models.Tabular1D(points, lt) assert ( repr(t) == "<Tabular1D(points=(array([0, 1, 2, 3, 4]),), lookup_table=[0 1 2 3 4])>" ) table = np.arange(5 * 5).reshape(5, 5) points = np.arange(0, 5) points = (points, points) t = models.Tabular2D(points=points, lookup_table=table) assert ( repr(t) == "<Tabular2D(points=(array([0, 1, 2, 3, 4]), array([0, 1, 2, 3, 4])), " "lookup_table=[[ 0 1 2 3 4]\n" " [ 5 6 7 8 9]\n" " [10 11 12 13 14]\n" " [15 16 17 18 19]\n" " [20 21 22 23 24]])>" ) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_tabular_str(): points = np.arange(5) lt = np.arange(5) t = models.Tabular1D(points, lt) assert ( str(t) == "Model: Tabular1D\n" "N_inputs: 1\n" "N_outputs: 1\n" "Parameters: \n" " points: (array([0, 1, 2, 3, 4]),)\n" " lookup_table: [0 1 2 3 4]\n" " method: linear\n" " fill_value: nan\n" " bounds_error: True" ) table = np.arange(5 * 5).reshape(5, 5) points = np.arange(0, 5) points = (points, points) t = models.Tabular2D(points=points, lookup_table=table) assert ( str(t) == "Model: Tabular2D\n" "N_inputs: 2\n" "N_outputs: 1\n" "Parameters: \n" " points: (array([0, 1, 2, 3, 4]), array([0, 1, 2, 3, 4]))\n" " lookup_table: [[ 0 1 2 3 4]\n" " [ 5 6 7 8 9]\n" " [10 11 12 13 14]\n" " [15 16 17 18 19]\n" " [20 21 22 23 24]]\n" " method: linear\n" " fill_value: nan\n" " bounds_error: True" ) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_tabular_evaluate(): points = np.arange(5) lt = np.arange(5)[::-1] t = models.Tabular1D(points, lt) assert (t.evaluate([1, 2, 3]) == [3, 2, 1]).all() assert (t.evaluate(np.array([1, 2, 3]) * u.m) == [3, 2, 1]).all() t.n_outputs = 2 value = [np.array([3, 2, 1]), np.array([1, 2, 3])] with mk.patch.object( tabular_models, "interpn", autospec=True, return_value=value ) as mkInterpn: outputs = t.evaluate([1, 2, 3]) for index, output in enumerate(outputs): assert np.all(value[index] == output) assert mkInterpn.call_count == 1 @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_tabular_module_name(): """ The module name must be set manually because these classes are created dynamically. """ for model in [models.Tabular1D, models.Tabular2D]: assert model.__module__ == "astropy.modeling.tabular" class classmodel(FittableModel): f = Parameter(default=1) x = Parameter(default=0) y = Parameter(default=2) def __init__(self, f=f.default, x=x.default, y=y.default): super().__init__(f, x, y) def evaluate(self): pass class subclassmodel(classmodel): f = Parameter(default=3, fixed=True) x = Parameter(default=10) y = Parameter(default=12) h = Parameter(default=5) def __init__(self, f=f.default, x=x.default, y=y.default, h=h.default): super().__init__(f, x, y) def evaluate(self): pass def test_parameter_inheritance(): b = subclassmodel() assert b.param_names == ("f", "x", "y", "h") assert b.h == 5 assert b.f == 3 assert b.f.fixed == True # noqa: E712 def test_parameter_description(): model = models.Gaussian1D(1.5, 2.5, 3.5) assert model.amplitude._description == "Amplitude (peak value) of the Gaussian" assert model.mean._description == "Position of peak (Gaussian)" model = models.Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9) assert model.amplitude_L._description == "The Lorentzian amplitude" assert model.fwhm_L._description == "The Lorentzian full width at half maximum" assert model.fwhm_G._description == "The Gaussian full width at half maximum" def test_SmoothlyBrokenPowerLaw1D_validators(): MESSAGE = r"amplitude parameter must be > 0" with pytest.raises(InputParameterError, match=MESSAGE): SmoothlyBrokenPowerLaw1D(amplitude=-1) MESSAGE = r"delta parameter must be >= 0.001" with pytest.raises(InputParameterError, match=MESSAGE): SmoothlyBrokenPowerLaw1D(delta=0) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.filterwarnings(r"ignore:.*:RuntimeWarning") @pytest.mark.filterwarnings(r"ignore:The fit may be unsuccessful.*") def test_SmoothlyBrokenPowerLaw1D_fit_deriv(): x_lim = [0.01, 100] x = np.logspace(x_lim[0], x_lim[1], 100) parameters = { "parameters": [1, 10, -2, 2, 0.5], "constraints": {"fixed": {"x_break": True, "delta": True}}, } model_with_deriv = create_model( SmoothlyBrokenPowerLaw1D, parameters, use_constraints=False ) model_no_deriv = create_model( SmoothlyBrokenPowerLaw1D, parameters, use_constraints=False ) # NOTE: PR 10644 replaced deprecated usage of RandomState but could not # find a new seed that did not cause test failure, resorted to hardcoding. # add 10% noise to the amplitude # fmt: off rsn_rand_1234567890 = np.array( [ 0.61879477, 0.59162363, 0.88868359, 0.89165480, 0.45756748, 0.77818808, 0.26706377, 0.99610621, 0.54009489, 0.53752161, 0.40099938, 0.70540579, 0.40518559, 0.94999075, 0.03075388, 0.13602495, 0.08297726, 0.42352224, 0.23449723, 0.74743526, 0.65177865, 0.68998682, 0.16413419, 0.87642114, 0.44733314, 0.57871104, 0.52377835, 0.62689056, 0.34869427, 0.26209748, 0.07498055, 0.17940570, 0.82999425, 0.98759822, 0.11326099, 0.63846415, 0.73056694, 0.88321124, 0.52721004, 0.66487673, 0.74209309, 0.94083846, 0.70123128, 0.29534353, 0.76134369, 0.77593881, 0.36985514, 0.89519067, 0.33082813, 0.86108824, 0.76897859, 0.61343376, 0.43870907, 0.91913538, 0.76958966, 0.51063556, 0.04443249, 0.57463611, 0.31382006, 0.41221713, 0.21531811, 0.03237521, 0.04166386, 0.73109303, 0.74556052, 0.64716325, 0.77575353, 0.64599254, 0.16885816, 0.48485480, 0.53844248, 0.99690349, 0.23657074, 0.04119088, 0.46501519, 0.35739006, 0.23002665, 0.53420791, 0.71639475, 0.81857486, 0.73994342, 0.07948837, 0.75688276, 0.13240193, 0.48465576, 0.20624753, 0.02298276, 0.54257873, 0.68123230, 0.35887468, 0.36296147, 0.67368397, 0.29505730, 0.66558885, 0.93652252, 0.36755130, 0.91787687, 0.75922703, 0.48668067, 0.45967890 ] ) # fmt: on n = 0.1 * parameters["parameters"][0] * (rsn_rand_1234567890 - 0.5) data = model_with_deriv(x) + n fitter_with_deriv = fitting.LevMarLSQFitter() new_model_with_deriv = fitter_with_deriv(model_with_deriv, x, data) fitter_no_deriv = fitting.LevMarLSQFitter() new_model_no_deriv = fitter_no_deriv( model_no_deriv, x, data, estimate_jacobian=True ) assert_allclose( new_model_with_deriv.parameters, new_model_no_deriv.parameters, atol=0.5 ) class _ExtendedModelMeta(_ModelMeta): @classmethod def __prepare__(mcls, name, bases, **kwds): # this shows the parent class machinery still applies namespace = super().__prepare__(name, bases, **kwds) # the custom bit namespace.update(kwds) return namespace model = models.Gaussian1D(1.5, 2.5, 3.5) assert model.amplitude._description == "Amplitude (peak value) of the Gaussian" assert model.mean._description == "Position of peak (Gaussian)" def test_metaclass_kwargs(): """Test can pass kwargs to Models""" class ClassModel(FittableModel, flag="flag"): def evaluate(self): pass # Nothing further to test, just making the class is good enough. def test_submetaclass_kwargs(): """Test can pass kwargs to Model subclasses.""" class ClassModel(FittableModel, metaclass=_ExtendedModelMeta, flag="flag"): def evaluate(self): pass assert ClassModel.flag == "flag" class ModelDefault(Model): slope = Parameter() intercept = Parameter() _separable = False @staticmethod def evaluate(x, slope, intercept): return slope * x + intercept class ModelCustom(ModelDefault): def _calculate_separability_matrix(self): return np.array([[0]]) def test_custom_separability_matrix(): original = separability_matrix(ModelDefault(slope=1, intercept=2)) assert original.all() custom = separability_matrix(ModelCustom(slope=1, intercept=2)) assert not custom.any()
2e1911b3af1b7c84f51947032d7f8bc315a78048e8c0e3bac49db9809e09c945
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module tests model set evaluation and fitting for some common use cases. """ import numpy as np # pylint: disable=invalid-name import pytest from numpy.testing import assert_allclose from astropy.modeling.core import Model from astropy.modeling.fitting import LinearLSQFitter from astropy.modeling.models import ( Chebyshev1D, Chebyshev2D, Hermite1D, Hermite2D, Legendre1D, Legendre2D, Linear1D, Planar2D, Polynomial1D, Polynomial2D, ) from astropy.modeling.parameters import Parameter from astropy.utils import NumpyRNGContext x = np.arange(4) xx = np.array([x, x + 10]) xxx = np.arange(24).reshape((3, 4, 2)) _RANDOM_SEED = 0x1337 class TParModel(Model): """ A toy model to test parameters machinery """ # standard_broadasting = False n_inputs = 1 outputs = ("x",) coeff = Parameter() e = Parameter() def __init__(self, coeff, e, **kwargs): super().__init__(coeff=coeff, e=e, **kwargs) @staticmethod def evaluate(x, coeff, e): return x * coeff + e @pytest.mark.parametrize( "model_class", [Polynomial1D, Chebyshev1D, Legendre1D, Hermite1D] ) def test_model1d_axis_1(model_class): """ Test that a model initialized with model_set_axis=1 can be evaluated with model_set_axis=False. """ n_models = 2 model_axis = 1 c0 = [[2, 3]] c1 = [[1, 2]] t1 = model_class(1, c0=2, c1=1) t2 = model_class(1, c0=3, c1=2) p1 = model_class(1, c0=c0, c1=c1, n_models=n_models, model_set_axis=model_axis) MESSAGE = r"For model_set_axis=1, all inputs must be at least 2-dimensional" with pytest.raises(ValueError, match=MESSAGE): p1(x) y = p1(x, model_set_axis=False) assert y.shape[model_axis] == n_models assert_allclose(y[:, 0], t1(x)) assert_allclose(y[:, 1], t2(x)) y = p1(xx, model_set_axis=False) assert y.shape[model_axis] == n_models assert_allclose(y[:, 0, :], t1(xx)) assert_allclose(y[:, 1, :], t2(xx)) y = p1(xxx, model_set_axis=False) assert y.shape[model_axis] == n_models assert_allclose(y[:, 0, :, :], t1(xxx)) assert_allclose(y[:, 1, :, :], t2(xxx)) @pytest.mark.parametrize( "model_class", [Polynomial1D, Chebyshev1D, Legendre1D, Hermite1D] ) def test_model1d_axis_2(model_class): """ Test that a model initialized with model_set_axis=2 can be evaluated with model_set_axis=False. """ p1 = model_class( 1, c0=[[[1, 2, 3]]], c1=[[[10, 20, 30]]], n_models=3, model_set_axis=2 ) t1 = model_class(1, c0=1, c1=10) t2 = model_class(1, c0=2, c1=20) t3 = model_class(1, c0=3, c1=30) MESSAGE = r"For model_set_axis=2, all inputs must be at least 3-dimensional" with pytest.raises(ValueError, match=MESSAGE): p1(x) with pytest.raises(ValueError, match=MESSAGE): p1(xx) y = p1(x, model_set_axis=False) assert y.shape == (1, 4, 3) assert_allclose(y[:, :, 0].flatten(), t1(x)) assert_allclose(y[:, :, 1].flatten(), t2(x)) assert_allclose(y[:, :, 2].flatten(), t3(x)) @pytest.mark.parametrize( "model_class", [Polynomial1D, Chebyshev1D, Legendre1D, Hermite1D] ) def test_model1d_axis_0(model_class): """ Test that a model initialized with model_set_axis=0 can be evaluated with model_set_axis=False. """ p1 = model_class(1, n_models=2, model_set_axis=0) p1.c0 = [2, 3] p1.c1 = [1, 2] t1 = model_class(1, c0=2, c1=1) t2 = model_class(1, c0=3, c1=2) MESSAGE = r"Input argument 'x' does not have the correct dimensions in .*" with pytest.raises(ValueError, match=MESSAGE): p1(x) y = p1(xx) assert len(y) == 2 assert_allclose(y[0], t1(xx[0])) assert_allclose(y[1], t2(xx[1])) y = p1(x, model_set_axis=False) assert len(y) == 2 assert_allclose(y[0], t1(x)) assert_allclose(y[1], t2(x)) y = p1(xx, model_set_axis=False) assert len(y) == 2 assert_allclose(y[0], t1(xx)) assert_allclose(y[1], t2(xx)) y = p1(xxx, model_set_axis=False) assert_allclose(y[0], t1(xxx)) assert_allclose(y[1], t2(xxx)) assert len(y) == 2 @pytest.mark.parametrize("model_class", [Chebyshev2D, Legendre2D, Hermite2D]) def test_model2d_axis_2(model_class): """ Test that a model initialized with model_set_axis=2 can be evaluated with model_set_axis=False. """ p2 = model_class( 1, 1, c0_0=[[[0, 1, 2]]], c0_1=[[[3, 4, 5]]], c1_0=[[[5, 6, 7]]], c1_1=[[[1, 1, 1]]], n_models=3, model_set_axis=2, ) t1 = model_class(1, 1, c0_0=0, c0_1=3, c1_0=5, c1_1=1) t2 = model_class(1, 1, c0_0=1, c0_1=4, c1_0=6, c1_1=1) t3 = model_class(1, 1, c0_0=2, c0_1=5, c1_0=7, c1_1=1) assert p2.c0_0.shape == (1, 1, 3) y = p2(x, x, model_set_axis=False) assert y.shape == (1, 4, 3) # These are columns along the 2nd axis. assert_allclose(y[:, :, 0].flatten(), t1(x, x)) assert_allclose(y[:, :, 1].flatten(), t2(x, x)) assert_allclose(y[:, :, 2].flatten(), t3(x, x)) def test_negative_axis(): p1 = Polynomial1D(1, c0=[1, 2], c1=[3, 4], n_models=2, model_set_axis=-1) t1 = Polynomial1D(1, c0=1, c1=3) t2 = Polynomial1D(1, c0=2, c1=4) MESSAGE = r"Input argument 'x' does not have the correct dimensions in .*" with pytest.raises(ValueError, match=MESSAGE): p1(x) with pytest.raises(ValueError, match=MESSAGE): p1(xx) xxt = xx.T y = p1(xxt) assert_allclose(y[:, 0], t1(xxt[:, 0])) assert_allclose(y[:, 1], t2(xxt[:, 1])) def test_shapes(): p2 = Polynomial1D(1, n_models=3, model_set_axis=2) assert p2.c0.shape == (1, 1, 3) assert p2.c1.shape == (1, 1, 3) p1 = Polynomial1D(1, n_models=2, model_set_axis=1) assert p1.c0.shape == (1, 2) assert p1.c1.shape == (1, 2) p1 = Polynomial1D(1, c0=[1, 2], c1=[3, 4], n_models=2, model_set_axis=-1) assert p1.c0.shape == (2,) assert p1.c1.shape == (2,) e1 = [1, 2] e2 = [3, 4] a1 = np.array([[10, 20], [30, 40]]) a2 = np.array([[50, 60], [70, 80]]) t = TParModel([a1, a2], [e1, e2], n_models=2, model_set_axis=-1) assert t.coeff.shape == (2, 2, 2) assert t.e.shape == (2, 2) t = TParModel([[a1, a2]], [[e1, e2]], n_models=2, model_set_axis=1) assert t.coeff.shape == (1, 2, 2, 2) assert t.e.shape == (1, 2, 2) t = TParModel([a1, a2], [e1, e2], n_models=2, model_set_axis=0) assert t.coeff.shape == (2, 2, 2) assert t.e.shape == (2, 2) t = TParModel([a1, a2], e=[1, 2], n_models=2, model_set_axis=0) assert t.coeff.shape == (2, 2, 2) assert t.e.shape == (2,) def test_eval(): """Tests evaluation of Linear1D and Planar2D with different model_set_axis.""" model = Linear1D(slope=[1, 2], intercept=[3, 4], n_models=2) p = Polynomial1D(1, c0=[3, 4], c1=[1, 2], n_models=2) assert_allclose(model(xx), p(xx)) assert_allclose(model(x, model_set_axis=False), p(x, model_set_axis=False)) MESSAGE = r"Input argument 'x' does not have the correct dimensions in .*" with pytest.raises(ValueError, match=MESSAGE): model(x) model = Linear1D(slope=[[1, 2]], intercept=[[3, 4]], n_models=2, model_set_axis=1) p = Polynomial1D(1, c0=[[3, 4]], c1=[[1, 2]], n_models=2, model_set_axis=1) assert_allclose(model(xx.T), p(xx.T)) assert_allclose(model(x, model_set_axis=False), p(x, model_set_axis=False)) with pytest.raises(ValueError, match=MESSAGE): model(xx) model = Planar2D(slope_x=[1, 2], slope_y=[1, 2], intercept=[3, 4], n_models=2) y = model(xx, xx) assert y.shape == (2, 4) MESSAGE = r"Missing input arguments - expected 2, got 1" with pytest.raises(ValueError, match=MESSAGE): model(x) # Test fitting @pytest.mark.parametrize( "model_class", [Polynomial1D, Chebyshev1D, Legendre1D, Hermite1D] ) def test_linearlsqfitter(model_class): """ Issue #7159 """ p = model_class(1, n_models=2, model_set_axis=1) # Generate data for fitting 2 models and re-stack them along the last axis: y = np.array([2 * x + 1, x + 4]) y = np.rollaxis(y, 0, -1).T f = LinearLSQFitter() # This seems to fit the model_set correctly: fit = f(p, x, y) model_y = fit(x, model_set_axis=False) m1 = model_class(1, c0=fit.c0[0][0], c1=fit.c1[0][0], domain=fit.domain) m2 = model_class(1, c0=fit.c0[0][1], c1=fit.c1[0][1], domain=fit.domain) assert_allclose(model_y[:, 0], m1(x)) assert_allclose(model_y[:, 1], m2(x)) p = model_class(1, n_models=2, model_set_axis=0) fit = f(p, x, y.T) def test_model_set_axis_outputs(): fitter = LinearLSQFitter() model_set = Polynomial2D(1, n_models=2, model_set_axis=2) y2, x2 = np.mgrid[:5, :5] # z = np.moveaxis([x2 + y2, 1 - 0.1 * x2 + 0.2 * y2]), 0, 2) z = np.rollaxis(np.array([x2 + y2, 1 - 0.1 * x2 + 0.2 * y2]), 0, 3) model = fitter(model_set, x2, y2, z) res = model(x2, y2, model_set_axis=False) assert z.shape == res.shape # Test initializing with integer model_set_axis # and evaluating with a different model_set_axis model_set = Polynomial1D(1, c0=[1, 2], c1=[2, 3], n_models=2, model_set_axis=0) y0 = model_set(xx) y1 = model_set(xx.T, model_set_axis=1) assert_allclose(y0[0], y1[:, 0]) assert_allclose(y0[1], y1[:, 1]) model_set = Polynomial1D(1, c0=[[1, 2]], c1=[[2, 3]], n_models=2, model_set_axis=1) y0 = model_set(xx.T) y1 = model_set(xx, model_set_axis=0) assert_allclose(y0[:, 0], y1[0]) assert_allclose(y0[:, 1], y1[1]) MESSAGE = r"For model_set_axis=1, all inputs must be at least 2-dimensional" with pytest.raises(ValueError, match=MESSAGE): model_set(x) def test_fitting_shapes(): """Test fitting model sets of Linear1D and Planar2D.""" fitter = LinearLSQFitter() model = Linear1D(slope=[1, 2], intercept=[3, 4], n_models=2) y = model(xx) fitter(model, x, y) model = Linear1D(slope=[[1, 2]], intercept=[[3, 4]], n_models=2, model_set_axis=1) fitter(model, x, y.T) model = Planar2D(slope_x=[1, 2], slope_y=[1, 2], intercept=[3, 4], n_models=2) y = model(xx, xx) fitter(model, x, x, y) def test_compound_model_sets(): MESSAGE = r"model_set_axis must be False or 0 and consistent for operands" with pytest.raises(ValueError, match=MESSAGE): ( Polynomial1D(1, n_models=2, model_set_axis=1) | Polynomial1D(1, n_models=2, model_set_axis=0) ) def test_linear_fit_model_set_errors(): init_model = Polynomial1D(degree=2, c0=[1, 1], n_models=2) x = np.arange(10) y = init_model(x, model_set_axis=False) fitter = LinearLSQFitter() MESSAGE = r"x and y should have the same shape" with pytest.raises(ValueError, match=MESSAGE): fitter(init_model, x[:5], y) with pytest.raises(ValueError, match=MESSAGE): fitter(init_model, x, y[:, :5]) def test_linear_fit_model_set_common_weight(): """Tests fitting multiple models simultaneously.""" init_model = Polynomial1D(degree=2, c0=[1, 1], n_models=2) x = np.arange(10) y_expected = init_model(x, model_set_axis=False) assert y_expected.shape == (2, 10) # Add a bit of random noise with NumpyRNGContext(_RANDOM_SEED): y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape) fitter = LinearLSQFitter() weights = np.ones(10) weights[[0, -1]] = 0 fitted_model = fitter(init_model, x, y, weights=weights) assert_allclose(fitted_model(x, model_set_axis=False), y_expected, rtol=1e-1) # Check that using null weights raises an error # ValueError: On entry to DLASCL parameter number 4 had an illegal value with pytest.raises(ValueError, match=r"Found NaNs in the coefficient matrix"): with pytest.warns( RuntimeWarning, match=r"invalid value encountered in.*divide" ): fitted_model = fitter(init_model, x, y, weights=np.zeros(10)) def test_linear_fit_model_set_weights(): """Tests fitting multiple models simultaneously.""" init_model = Polynomial1D(degree=2, c0=[1, 1], n_models=2) x = np.arange(10) y_expected = init_model(x, model_set_axis=False) assert y_expected.shape == (2, 10) # Add a bit of random noise with NumpyRNGContext(_RANDOM_SEED): y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape) weights = np.ones_like(y) # Put a null weight for the min and max values weights[[0, 1], weights.argmin(axis=1)] = 0 weights[[0, 1], weights.argmax(axis=1)] = 0 fitter = LinearLSQFitter() fitted_model = fitter(init_model, x, y, weights=weights) assert_allclose(fitted_model(x, model_set_axis=False), y_expected, rtol=1e-1) # Check that using null weights raises an error weights[0] = 0 with pytest.raises(ValueError, match=r"Found NaNs in the coefficient matrix"): with pytest.warns( RuntimeWarning, match=r"invalid value encountered in.*divide" ): fitted_model = fitter(init_model, x, y, weights=weights) # Now we mask the values where weight is 0 with pytest.warns(RuntimeWarning, match=r"invalid value encountered in.*divide"): fitted_model = fitter( init_model, x, np.ma.array(y, mask=np.isclose(weights, 0)), weights=weights ) # Parameters for the first model are all NaNs assert np.all(np.isnan(fitted_model.param_sets[:, 0])) assert np.all(np.isnan(fitted_model(x, model_set_axis=False)[0])) # Second model is fitted correctly assert_allclose(fitted_model(x, model_set_axis=False)[1], y_expected[1], rtol=1e-1) def test_linear_fit_2d_model_set_errors(): init_model = Polynomial2D(degree=2, c0_0=[1, 1], n_models=2) x = np.arange(10) y = np.arange(10) z = init_model(x, y, model_set_axis=False) fitter = LinearLSQFitter() MESSAGE = r"x, y and z should have the same shape" with pytest.raises(ValueError, match=MESSAGE): fitter(init_model, x[:5], y, z) with pytest.raises(ValueError, match=MESSAGE): fitter(init_model, x, y, z[:, :5]) def test_linear_fit_2d_model_set_common_weight(): init_model = Polynomial2D( degree=2, c1_0=[1, 2], c0_1=[-0.5, 1], n_models=2, fixed={"c1_0": True, "c0_1": True}, ) x, y = np.mgrid[0:5, 0:5] zz = np.array([1 + x - 0.5 * y + 0.1 * x * x, 2 * x + y - 0.2 * y * y]) fitter = LinearLSQFitter() fitted_model = fitter(init_model, x, y, zz, weights=np.ones((5, 5))) assert_allclose(fitted_model(x, y, model_set_axis=False), zz, atol=1e-14) def test_linear_fit_flat_2d_model_set_common_weight(): init_model = Polynomial2D( degree=2, c1_0=[1, 2], c0_1=[-0.5, 1], n_models=2, fixed={"c1_0": True, "c0_1": True}, ) x, y = np.mgrid[0:5, 0:5] x, y = x.flatten(), y.flatten() zz = np.array([1 + x - 0.5 * y + 0.1 * x * x, 2 * x + y - 0.2 * y * y]) weights = np.ones(25) fitter = LinearLSQFitter() fitted_model = fitter(init_model, x, y, zz, weights=weights) assert_allclose(fitted_model(x, y, model_set_axis=False), zz, atol=1e-14) def test_linear_fit_2d_model_set_weights(): init_model = Polynomial2D( degree=2, c1_0=[1, 2], c0_1=[-0.5, 1], n_models=2, fixed={"c1_0": True, "c0_1": True}, ) x, y = np.mgrid[0:5, 0:5] zz = np.array([1 + x - 0.5 * y + 0.1 * x * x, 2 * x + y - 0.2 * y * y]) fitter = LinearLSQFitter() weights = [np.ones((5, 5)), np.ones((5, 5))] fitted_model = fitter(init_model, x, y, zz, weights=weights) assert_allclose(fitted_model(x, y, model_set_axis=False), zz, atol=1e-14) def test_linear_fit_flat_2d_model_set_weights(): init_model = Polynomial2D( degree=2, c1_0=[1, 2], c0_1=[-0.5, 1], n_models=2, fixed={"c1_0": True, "c0_1": True}, ) x, y = np.mgrid[0:5, 0:5] x, y = x.flatten(), y.flatten() zz = np.array([1 + x - 0.5 * y + 0.1 * x * x, 2 * x + y - 0.2 * y * y]) weights = np.ones((2, 25)) fitter = LinearLSQFitter() fitted_model = fitter(init_model, x, y, zz, weights=weights) assert_allclose(fitted_model(x, y, model_set_axis=False), zz, atol=1e-14) class Test1ModelSet: """ Check that fitting a single model works with a length-1 model set axis. It's not clear that this was originally intended usage, but it can be convenient, eg. when fitting a range of image rows that may be a single row, and some existing scripts might rely on it working. Currently this does not work with FittingWithOutlierRemoval. """ def setup_class(self): self.x1 = np.arange(0, 10) self.y1 = np.array([0.5 + 2.5 * self.x1]) self.w1 = np.ones((10,)) self.y1[0, 8] = 100.0 self.w1[8] = 0.0 self.y2, self.x2 = np.mgrid[0:10, 0:10] self.z2 = np.array([1 - 0.1 * self.x2 + 0.2 * self.y2]) self.w2 = np.ones((10, 10)) self.z2[0, 1, 2] = 100.0 self.w2[1, 2] = 0.0 def test_linear_1d_common_weights(self): model = Polynomial1D(1) fitter = LinearLSQFitter() model = fitter(model, self.x1, self.y1, weights=self.w1) assert_allclose(model.c0, 0.5, atol=1e-12) assert_allclose(model.c1, 2.5, atol=1e-12) def test_linear_1d_separate_weights(self): model = Polynomial1D(1) fitter = LinearLSQFitter() model = fitter(model, self.x1, self.y1, weights=self.w1[np.newaxis, ...]) assert_allclose(model.c0, 0.5, atol=1e-12) assert_allclose(model.c1, 2.5, atol=1e-12) def test_linear_1d_separate_weights_axis_1(self): model = Polynomial1D(1, model_set_axis=1) fitter = LinearLSQFitter() model = fitter(model, self.x1, self.y1.T, weights=self.w1[..., np.newaxis]) assert_allclose(model.c0, 0.5, atol=1e-12) assert_allclose(model.c1, 2.5, atol=1e-12) def test_linear_2d_common_weights(self): model = Polynomial2D(1) fitter = LinearLSQFitter() model = fitter(model, self.x2, self.y2, self.z2, weights=self.w2) assert_allclose(model.c0_0, 1.0, atol=1e-12) assert_allclose(model.c1_0, -0.1, atol=1e-12) assert_allclose(model.c0_1, 0.2, atol=1e-12) def test_linear_2d_separate_weights(self): model = Polynomial2D(1) fitter = LinearLSQFitter() model = fitter( model, self.x2, self.y2, self.z2, weights=self.w2[np.newaxis, ...] ) assert_allclose(model.c0_0, 1.0, atol=1e-12) assert_allclose(model.c1_0, -0.1, atol=1e-12) assert_allclose(model.c0_1, 0.2, atol=1e-12) def test_linear_2d_separate_weights_axis_2(self): model = Polynomial2D(1, model_set_axis=2) fitter = LinearLSQFitter() model = fitter( model, self.x2, self.y2, np.rollaxis(self.z2, 0, 3), weights=self.w2[..., np.newaxis], ) assert_allclose(model.c0_0, 1.0, atol=1e-12) assert_allclose(model.c1_0, -0.1, atol=1e-12) assert_allclose(model.c0_1, 0.2, atol=1e-12)
55ddc2d714f91ca6a18e3348603645bb86ecdfb36e29b6511345274588452db3
# Licensed under a 3-clause BSD style license - see LICENSE.rst # pylint: disable=invalid-name, no-member import numpy as np import pytest from numpy.testing import assert_allclose from astropy import units as u from astropy.modeling import models from astropy.tests.helper import assert_quantity_allclose from astropy.wcs import wcs @pytest.mark.parametrize( "inp", [(0, 0), (4000, -20.56), (-2001.5, 45.9), (0, 90), (0, -90), (np.mgrid[:4, :6])], ) def test_against_wcslib(inp): w = wcs.WCS() crval = [202.4823228, 47.17511893] w.wcs.crval = crval w.wcs.ctype = ["RA---TAN", "DEC--TAN"] lonpole = 180 tan = models.Pix2Sky_TAN() n2c = models.RotateNative2Celestial( crval[0] * u.deg, crval[1] * u.deg, lonpole * u.deg ) c2n = models.RotateCelestial2Native( crval[0] * u.deg, crval[1] * u.deg, lonpole * u.deg ) m = tan | n2c minv = c2n | tan.inverse radec = w.wcs_pix2world(inp[0], inp[1], 1) xy = w.wcs_world2pix(radec[0], radec[1], 1) assert_allclose(m(*inp), radec, atol=1e-12) assert_allclose(minv(*radec), xy, atol=1e-12) @pytest.mark.parametrize( "inp", [(40 * u.deg, -0.057 * u.rad), (21.5 * u.arcsec, 45.9 * u.deg)] ) def test_roundtrip_sky_rotation(inp): lon, lat, lon_pole = 42 * u.deg, (43 * u.deg).to(u.arcsec), (44 * u.deg).to(u.rad) n2c = models.RotateNative2Celestial(lon, lat, lon_pole) c2n = models.RotateCelestial2Native(lon, lat, lon_pole) assert_quantity_allclose(n2c.inverse(*n2c(*inp)), inp, atol=1e-13 * u.deg) assert_quantity_allclose(c2n.inverse(*c2n(*inp)), inp, atol=1e-13 * u.deg) def test_Rotation2D(): model = models.Rotation2D(angle=90 * u.deg) a, b = 1 * u.deg, 0 * u.deg x, y = model(a, b) assert_quantity_allclose([x, y], [0 * u.deg, 1 * u.deg], atol=1e-10 * u.deg) def test_Rotation2D_inverse(): model = models.Rotation2D(angle=234.23494 * u.deg) x, y = model.inverse(*model(1 * u.deg, 0 * u.deg)) assert_quantity_allclose([x, y], [1 * u.deg, 0 * u.deg], atol=1e-10 * u.deg) def test_euler_angle_rotations(): ydeg = (90 * u.deg, 0 * u.deg) y = (90, 0) z = (0, 90) # rotate y into minus z model = models.EulerAngleRotation(0 * u.rad, np.pi / 2 * u.rad, 0 * u.rad, "zxz") assert_allclose(model(*z), y, atol=10**-12) model = models.EulerAngleRotation(0 * u.deg, 90 * u.deg, 0 * u.deg, "zxz") assert_quantity_allclose(model(*(z * u.deg)), ydeg, atol=10**-12 * u.deg) @pytest.mark.parametrize( "params", [ (60, 10, 25), (60 * u.deg, 10 * u.deg, 25 * u.deg), ((60 * u.deg).to(u.rad), (10 * u.deg).to(u.rad), (25 * u.deg).to(u.rad)), ], ) def test_euler_rotations_with_units(params): x = 1 * u.deg y = 1 * u.deg phi, theta, psi = params urot = models.EulerAngleRotation(phi, theta, psi, axes_order="xyz") a, b = urot(x.value, y.value) assert_allclose((a, b), (-23.614457631192547, 9.631254579686113)) a, b = urot(x, y) assert_quantity_allclose( (a, b), (-23.614457631192547 * u.deg, 9.631254579686113 * u.deg) ) a, b = urot(x.to(u.rad), y.to(u.rad)) assert_quantity_allclose( (a, b), (-23.614457631192547 * u.deg, 9.631254579686113 * u.deg) ) def test_attributes(): n2c = models.RotateNative2Celestial(20016 * u.arcsec, -72.3 * u.deg, np.pi * u.rad) assert_allclose(n2c.lat.value, -72.3) assert_allclose(n2c.lat._raw_value, -1.2618730491919001) assert_allclose(n2c.lon.value, 20016) assert_allclose(n2c.lon._raw_value, 0.09704030641088472) assert_allclose(n2c.lon_pole.value, np.pi) assert_allclose(n2c.lon_pole._raw_value, np.pi) assert n2c.lon.unit is u.Unit("arcsec") assert n2c.lon.internal_unit is u.Unit("rad") assert n2c.lat.unit is u.Unit("deg") assert n2c.lat.internal_unit is u.Unit("rad") assert n2c.lon_pole.unit is u.Unit("rad") assert n2c.lon_pole.internal_unit is u.Unit("rad")
60bb7196a87dd85374eab6fb76079cc3192cb9f70525a4aa0f3b3a78a957aa7e
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Tests that relate to evaluating models with quantity parameters """ import numpy as np import pytest from numpy.testing import assert_allclose from astropy import units as u from astropy.modeling.core import Model from astropy.modeling.models import Gaussian1D, Pix2Sky_TAN, Scale, Shift from astropy.tests.helper import assert_quantity_allclose from astropy.units import UnitsError # We start off by taking some simple cases where the units are defined by # whatever the model is initialized with, and we check that the model evaluation # returns quantities. MESSAGE = ( "{}: Units of input 'x', {}.*, could not be converted to required input units of" " {}.*" ) def test_evaluate_with_quantities(): """ Test evaluation of a single model with Quantity parameters that do not explicitly require units. """ # We create two models here - one with quantities, and one without. The one # without is used to create the reference values for comparison. g = Gaussian1D(1, 1, 0.1) gq = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m) # We first check that calling the Gaussian with quantities returns the # expected result assert_quantity_allclose(gq(1 * u.m), g(1) * u.J) # Units have to be specified for the Gaussian with quantities - if not, an # error is raised with pytest.raises(UnitsError, match=MESSAGE.format("Gaussian1D", "", "m ")): gq(1) # However, zero is a special case assert_quantity_allclose(gq(0), g(0) * u.J) # We can also evaluate models with equivalent units assert_allclose(gq(0.0005 * u.km).value, g(0.5)) # But not with incompatible units with pytest.raises(UnitsError, match=MESSAGE.format("Gaussian1D", "s", "m")): gq(3 * u.s) # We also can't evaluate the model without quantities with a quantity with pytest.raises( UnitsError, match=r"Can only apply 'subtract' function to dimensionless quantities .*", ): g(3 * u.m) # TODO: determine what error message should be here # assert exc.value.args[0] == ("Units of input 'x', m (length), could not be " # "converted to required dimensionless input") def test_evaluate_with_quantities_and_equivalencies(): """ We now make sure that equivalencies are correctly taken into account """ g = Gaussian1D(1 * u.Jy, 10 * u.nm, 2 * u.nm) # We aren't setting the equivalencies, so this won't work with pytest.raises(UnitsError, match=MESSAGE.format("Gaussian1D", "PHz", "nm")): g(30 * u.PHz) # But it should now work if we pass equivalencies when evaluating assert_quantity_allclose( g(30 * u.PHz, equivalencies={"x": u.spectral()}), g(9.993081933333332 * u.nm) ) class MyTestModel(Model): n_inputs = 2 n_outputs = 1 def evaluate(self, a, b): print("a", a) print("b", b) return a * b class TestInputUnits: def setup_method(self, method): self.model = MyTestModel() def test_evaluate(self): # We should be able to evaluate with anything assert_quantity_allclose(self.model(3, 5), 15) assert_quantity_allclose(self.model(4 * u.m, 5), 20 * u.m) assert_quantity_allclose(self.model(3 * u.deg, 5), 15 * u.deg) def test_input_units(self): self.model._input_units = {"x": u.deg} assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg) assert_quantity_allclose(self.model(4 * u.rad, 2), 8 * u.rad) assert_quantity_allclose(self.model(4 * u.rad, 2 * u.s), 8 * u.rad * u.s) with pytest.raises(UnitsError, match=MESSAGE.format("MyTestModel", "s", "deg")): self.model(4 * u.s, 3) with pytest.raises(UnitsError, match=MESSAGE.format("MyTestModel", "", "deg")): self.model(3, 3) def test_input_units_allow_dimensionless(self): self.model._input_units = {"x": u.deg} self.model._input_units_allow_dimensionless = True assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg) assert_quantity_allclose(self.model(4 * u.rad, 2), 8 * u.rad) with pytest.raises(UnitsError, match=MESSAGE.format("MyTestModel", "s", "deg")): self.model(4 * u.s, 3) assert_quantity_allclose(self.model(3, 3), 9) def test_input_units_strict(self): self.model._input_units = {"x": u.deg} self.model._input_units_strict = True assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg) result = self.model(np.pi * u.rad, 2) assert_quantity_allclose(result, 360 * u.deg) assert result.unit is u.deg def test_input_units_equivalencies(self): self.model._input_units = {"x": u.micron} with pytest.raises( UnitsError, match=MESSAGE.format("MyTestModel", "PHz", "micron") ): self.model(3 * u.PHz, 3) self.model.input_units_equivalencies = {"x": u.spectral()} assert_quantity_allclose( self.model(3 * u.PHz, 3), 3 * (3 * u.PHz).to(u.micron, equivalencies=u.spectral()), ) def test_return_units(self): self.model._input_units = {"z": u.deg} self.model._return_units = {"z": u.rad} result = self.model(3 * u.deg, 4) assert_quantity_allclose(result, 12 * u.deg) assert result.unit is u.rad def test_return_units_scalar(self): # Check that return_units also works when giving a single unit since # there is only one output, so is unambiguous. self.model._input_units = {"x": u.deg} self.model._return_units = u.rad result = self.model(3 * u.deg, 4) assert_quantity_allclose(result, 12 * u.deg) assert result.unit is u.rad def test_and_input_units(): """ Test units to first model in chain. """ s1 = Shift(10 * u.deg) s2 = Shift(10 * u.deg) cs = s1 & s2 out = cs(10 * u.arcsecond, 20 * u.arcsecond) assert_quantity_allclose(out[0], 10 * u.deg + 10 * u.arcsec) assert_quantity_allclose(out[1], 10 * u.deg + 20 * u.arcsec) def test_plus_input_units(): """ Test units to first model in chain. """ s1 = Shift(10 * u.deg) s2 = Shift(10 * u.deg) cs = s1 + s2 out = cs(10 * u.arcsecond) assert_quantity_allclose(out, 20 * u.deg + 20 * u.arcsec) def test_compound_input_units(): """ Test units to first model in chain. """ s1 = Shift(10 * u.deg) s2 = Shift(10 * u.deg) cs = s1 | s2 out = cs(10 * u.arcsecond) assert_quantity_allclose(out, 20 * u.deg + 10 * u.arcsec) def test_compound_input_units_fail(): """ Test incompatible units to first model in chain. """ s1 = Shift(10 * u.deg) s2 = Shift(10 * u.deg) cs = s1 | s2 with pytest.raises(UnitsError, match=MESSAGE.format("Shift", "pix", "deg")): cs(10 * u.pix) def test_compound_incompatible_units_fail(): """ Test incompatible model units in chain. """ s1 = Shift(10 * u.pix) s2 = Shift(10 * u.deg) cs = s1 | s2 with pytest.raises(UnitsError, match=MESSAGE.format("Shift", "pix", "deg")): cs(10 * u.pix) def test_compound_pipe_equiv_call(): """ Check that equivalencies work when passed to evaluate, for a chained model (which has one input). """ s1 = Shift(10 * u.deg) s2 = Shift(10 * u.deg) cs = s1 | s2 out = cs(10 * u.pix, equivalencies={"x": u.pixel_scale(0.5 * u.deg / u.pix)}) assert_quantity_allclose(out, 25 * u.deg) def test_compound_and_equiv_call(): """ Check that equivalencies work when passed to evaluate, for a composite model with two inputs. """ s1 = Shift(10 * u.deg) s2 = Shift(10 * u.deg) cs = s1 & s2 out = cs( 10 * u.pix, 10 * u.pix, equivalencies={ "x0": u.pixel_scale(0.5 * u.deg / u.pix), "x1": u.pixel_scale(0.5 * u.deg / u.pix), }, ) assert_quantity_allclose(out[0], 15 * u.deg) assert_quantity_allclose(out[1], 15 * u.deg) def test_compound_input_units_equivalencies(): """ Test setting input_units_equivalencies on one of the models. """ s1 = Shift(10 * u.deg) s1.input_units_equivalencies = {"x": u.pixel_scale(0.5 * u.deg / u.pix)} s2 = Shift(10 * u.deg) sp = Shift(10 * u.pix) cs = s1 | s2 assert cs.input_units_equivalencies == {"x": u.pixel_scale(0.5 * u.deg / u.pix)} out = cs(10 * u.pix) assert_quantity_allclose(out, 25 * u.deg) cs = sp | s1 assert cs.input_units_equivalencies is None out = cs(10 * u.pix) assert_quantity_allclose(out, 20 * u.deg) cs = s1 & s2 assert cs.input_units_equivalencies == {"x0": u.pixel_scale(0.5 * u.deg / u.pix)} cs = cs.rename("TestModel") out = cs(20 * u.pix, 10 * u.deg) assert_quantity_allclose(out, 20 * u.deg) with pytest.raises(UnitsError, match=MESSAGE.format("Shift", "pix", "deg")): out = cs(20 * u.pix, 10 * u.pix) def test_compound_input_units_strict(): """ Test setting input_units_strict on one of the models. """ class ScaleDegrees(Scale): input_units = {"x": u.deg} s1 = ScaleDegrees(2) s2 = Scale(2) cs = s1 | s2 out = cs(10 * u.arcsec) assert_quantity_allclose(out, 40 * u.arcsec) assert out.unit is u.deg # important since this tests input_units_strict cs = s2 | s1 out = cs(10 * u.arcsec) assert_quantity_allclose(out, 40 * u.arcsec) assert out.unit is u.deg # important since this tests input_units_strict cs = s1 & s2 out = cs(10 * u.arcsec, 10 * u.arcsec) assert_quantity_allclose(out, 20 * u.arcsec) assert out[0].unit is u.deg assert out[1].unit is u.arcsec def test_compound_input_units_allow_dimensionless(): """ Test setting input_units_allow_dimensionless on one of the models. """ class ScaleDegrees(Scale): input_units = {"x": u.deg} s1 = ScaleDegrees(2) s1._input_units_allow_dimensionless = True s2 = Scale(2) cs = s1 | s2 cs = cs.rename("TestModel") out = cs(10) assert_quantity_allclose(out, 40 * u.one) out = cs(10 * u.arcsec) assert_quantity_allclose(out, 40 * u.arcsec) with pytest.raises(UnitsError, match=MESSAGE.format("ScaleDegrees", "m", "deg")): out = cs(10 * u.m) s1._input_units_allow_dimensionless = False cs = s1 | s2 cs = cs.rename("TestModel") with pytest.raises(UnitsError, match=MESSAGE.format("ScaleDegrees", "", "deg")): out = cs(10) s1._input_units_allow_dimensionless = True cs = s2 | s1 cs = cs.rename("TestModel") out = cs(10) assert_quantity_allclose(out, 40 * u.one) out = cs(10 * u.arcsec) assert_quantity_allclose(out, 40 * u.arcsec) with pytest.raises(UnitsError, match=MESSAGE.format("ScaleDegrees", "m", "deg")): out = cs(10 * u.m) s1._input_units_allow_dimensionless = False cs = s2 | s1 with pytest.raises(UnitsError, match=MESSAGE.format("ScaleDegrees", "", "deg")): out = cs(10) s1._input_units_allow_dimensionless = True s1 = ScaleDegrees(2) s1._input_units_allow_dimensionless = True s2 = ScaleDegrees(2) s2._input_units_allow_dimensionless = False cs = s1 & s2 cs = cs.rename("TestModel") out = cs(10, 10 * u.arcsec) assert_quantity_allclose(out[0], 20 * u.one) assert_quantity_allclose(out[1], 20 * u.arcsec) with pytest.raises(UnitsError, match=MESSAGE.format("ScaleDegrees", "", "deg")): out = cs(10, 10) def test_compound_return_units(): """ Test that return_units on the first model in the chain is respected for the input to the second. """ class PassModel(Model): n_inputs = 2 n_outputs = 2 def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @property def input_units(self): """Input units.""" return {"x0": u.deg, "x1": u.deg} @property def return_units(self): """Output units.""" return {"x0": u.deg, "x1": u.deg} def evaluate(self, x, y): return x.value, y.value cs = Pix2Sky_TAN() | PassModel() assert_quantity_allclose(cs(0 * u.deg, 0 * u.deg), (0, 90) * u.deg)
ab88de85cecbb2e7de6087c328633de2f45e4570b9d89121f19381ab311afef7
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Tests that relate to fitting models with quantity parameters """ import numpy as np import pytest from astropy import units as u from astropy.modeling import fitting, models from astropy.modeling.core import Fittable1DModel from astropy.modeling.parameters import Parameter from astropy.tests.helper import assert_quantity_allclose from astropy.units import UnitsError from astropy.utils import NumpyRNGContext from astropy.utils.compat.optional_deps import HAS_SCIPY # Fitting should be as intuitive as possible to the user. Essentially, models # and fitting should work without units, but if one has units, the other should # have units too, and the resulting fitted parameters will also have units. fitters = [ fitting.LevMarLSQFitter, fitting.TRFLSQFitter, fitting.LMLSQFitter, fitting.DogBoxLSQFitter, ] def _fake_gaussian_data(): # Generate fake data with NumpyRNGContext(12345): x = np.linspace(-5.0, 5.0, 2000) y = 3 * np.exp(-0.5 * (x - 1.3) ** 2 / 0.8**2) y += np.random.normal(0.0, 0.2, x.shape) # Attach units to data x = x * u.m y = y * u.Jy return x, y compound_models_no_units = [ models.Linear1D() + models.Gaussian1D() + models.Gaussian1D(), models.Linear1D() + models.Gaussian1D() | models.Scale(), models.Linear1D() + models.Gaussian1D() | models.Shift(), ] class CustomInputNamesModel(Fittable1DModel): n_inputs = 1 n_outputs = 1 a = Parameter(default=1.0) b = Parameter(default=1.0) def __init__(self, a=a, b=b): super().__init__(a=a, b=b) self.inputs = ("inn",) self.outputs = ("out",) @staticmethod def evaluate(inn, a, b): return a * inn + b @property def input_units(self): if self.a.unit is None and self.b.unit is None: return None else: return {"inn": self.b.unit / self.a.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {"a": outputs_unit["out"] / inputs_unit["inn"], "b": outputs_unit["out"]} def models_with_custom_names(): line = models.Linear1D(1 * u.m / u.s, 2 * u.m) line.inputs = ("inn",) line.outputs = ("out",) custom_names_model = CustomInputNamesModel(1 * u.m / u.s, 2 * u.m) return [line, custom_names_model] @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", fitters) def test_fitting_simple(fitter): fitter = fitter() x, y = _fake_gaussian_data() # Fit the data using a Gaussian with units g_init = models.Gaussian1D() g = fitter(g_init, x, y) # TODO: update actual numerical results once implemented, but these should # be close to the values below. assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05) assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05) assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", fitters) def test_fitting_with_initial_values(fitter): fitter = fitter() x, y = _fake_gaussian_data() # Fit the data using a Gaussian with units g_init = models.Gaussian1D(amplitude=1.0 * u.mJy, mean=3 * u.cm, stddev=2 * u.mm) g = fitter(g_init, x, y) # TODO: update actual numerical results once implemented, but these should # be close to the values below. assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05) assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05) assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", fitters) def test_fitting_missing_data_units(fitter): """ Raise an error if the model has units but the data doesn't """ fitter = fitter() class UnorderedGaussian1D(models.Gaussian1D): # Parameters are ordered differently here from Gaussian1D # to ensure the order does not break functionality. def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "amplitude": outputs_unit["y"], "mean": inputs_unit["x"], "stddev": inputs_unit["x"], } g_init = UnorderedGaussian1D(amplitude=1.0 * u.mJy, mean=3 * u.cm, stddev=2 * u.mm) # We define flux unit so that conversion fails at wavelength unit. # This is because the order of parameter unit conversion seems to # follow the order defined in _parameter_units_for_data_units method. MESSAGE = r"'cm' .* and '' .* are not convertible" with pytest.raises(UnitsError, match=MESSAGE): fitter(g_init, [1, 2, 3], [4, 5, 6] * (u.erg / (u.s * u.cm * u.cm * u.Hz))) MESSAGE = r"'mJy' .* and '' .* are not convertible" with pytest.raises(UnitsError, match=MESSAGE): fitter(g_init, [1, 2, 3] * u.m, [4, 5, 6]) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", fitters) def test_fitting_missing_model_units(fitter): """ Proceed if the data has units but the model doesn't """ fitter = fitter() x, y = _fake_gaussian_data() g_init = models.Gaussian1D(amplitude=1.0, mean=3, stddev=2) g = fitter(g_init, x, y) assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05) assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05) assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05) g_init = models.Gaussian1D(amplitude=1.0, mean=3 * u.m, stddev=2 * u.m) g = fitter(g_init, x, y) assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05) assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05) assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", fitters) def test_fitting_incompatible_units(fitter): """ Raise an error if the data and model have incompatible units """ fitter = fitter() g_init = models.Gaussian1D(amplitude=1.0 * u.Jy, mean=3 * u.m, stddev=2 * u.cm) MESSAGE = r"'Hz' .* and 'm' .* are not convertible" with pytest.raises(UnitsError, match=MESSAGE): fitter(g_init, [1, 2, 3] * u.Hz, [4, 5, 6] * u.Jy) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.filterwarnings(r"ignore:The fit may be unsuccessful.*") @pytest.mark.filterwarnings(r"ignore:divide by zero encountered.*") @pytest.mark.parametrize("model", compound_models_no_units) @pytest.mark.parametrize("fitter", fitters) def test_compound_without_units(model, fitter): fitter = fitter() x = np.linspace(-5, 5, 10) * u.Angstrom with NumpyRNGContext(12345): y = np.random.sample(10) res_fit = fitter(model, x, y * u.Hz) for param_name in res_fit.param_names: print(getattr(res_fit, param_name)) assert all([res_fit[i]._has_units for i in range(3)]) z = res_fit(x) assert isinstance(z, u.Quantity) res_fit = fitter(model, np.arange(10) * u.Unit("Angstrom"), y) assert all([res_fit[i]._has_units for i in range(3)]) z = res_fit(x) assert isinstance(z, np.ndarray) # FIXME: See https://github.com/astropy/astropy/issues/10675 # @pytest.mark.skipif(not HAS_SCIPY, reason='requires scipy') @pytest.mark.skip(reason="Flaky and ill-conditioned") @pytest.mark.parametrize("fitter", fitters) def test_compound_fitting_with_units(fitter): fitter = fitter() x = np.linspace(-5, 5, 15) * u.Angstrom y = np.linspace(-5, 5, 15) * u.Angstrom fitter = fitter() m = models.Gaussian2D( 10 * u.Hz, 3 * u.Angstrom, 4 * u.Angstrom, 1 * u.Angstrom, 2 * u.Angstrom ) p = models.Planar2D(3 * u.Hz / u.Angstrom, 4 * u.Hz / u.Angstrom, 1 * u.Hz) model = m + p z = model(x, y) res = fitter(model, x, y, z) assert isinstance(res(x, y), np.ndarray) assert all([res[i]._has_units for i in range(2)]) model = models.Gaussian2D() + models.Planar2D() res = fitter(model, x, y, z) assert isinstance(res(x, y), np.ndarray) assert all([res[i]._has_units for i in range(2)]) # A case of a mixture of models with and without units model = models.BlackBody(temperature=3000 * u.K) * models.Const1D(amplitude=1.0) x = np.linspace(1, 3, 10000) * u.micron with NumpyRNGContext(12345): n = np.random.normal(3) y = model(x) res = fitter(model, x, y * (1 + n)) # The large rtol here is due to different results on linux and macosx, likely # the model is ill-conditioned. np.testing.assert_allclose( res.parameters, [3000, 2.1433621e00, 2.647347e00], rtol=0.4 ) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.filterwarnings(r"ignore:Model is linear in parameters*") @pytest.mark.parametrize("model", models_with_custom_names()) @pytest.mark.parametrize("fitter", fitters) def test_fitting_custom_names(model, fitter): """Tests fitting of models with custom inputs and outsputs names.""" fitter = fitter() x = np.linspace(0, 10, 100) * u.s y = model(x) new_model = fitter(model, x, y) for param_name in model.param_names: assert_quantity_allclose( getattr(new_model, param_name).quantity, getattr(model, param_name).quantity )
05fa27460baf8a09bd539c4c0dd6d43e477c409c075178e1583210283bd60b52
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module tests fitting and model evaluation with various inputs """ import numpy as np import pytest from numpy.testing import assert_allclose from astropy.modeling import fitting, models from astropy.modeling.core import Fittable1DModel, FittableModel, Model from astropy.modeling.parameters import Parameter from astropy.utils.compat.optional_deps import HAS_SCIPY model1d_params = [ (models.Polynomial1D, [2]), (models.Legendre1D, [2]), (models.Chebyshev1D, [2]), (models.Shift, [2]), (models.Scale, [2]), ] model2d_params = [ (models.Polynomial2D, [2]), (models.Legendre2D, [1, 2]), (models.Chebyshev2D, [1, 2]), ] fitters = [ fitting.LevMarLSQFitter, fitting.TRFLSQFitter, fitting.LMLSQFitter, fitting.DogBoxLSQFitter, ] class TestInputType: """ This class tests that models accept numbers, lists and arrays. Add new models to one of the lists above to test for this. """ def setup_class(self): self.x = 5.3 self.y = 6.7 self.x1 = np.arange(1, 10, 0.1) self.y1 = np.arange(1, 10, 0.1) self.y2, self.x2 = np.mgrid[:10, :8] @pytest.mark.parametrize(("model", "params"), model1d_params) def test_input1D(self, model, params): m = model(*params) m(self.x) m(self.x1) m(self.x2) @pytest.mark.parametrize(("model", "params"), model2d_params) def test_input2D(self, model, params): m = model(*params) m(self.x, self.y) m(self.x1, self.y1) m(self.x2, self.y2) class TestFitting: """Test various input options to fitting routines.""" def setup_class(self): self.x1 = np.arange(10) self.y, self.x = np.mgrid[:10, :10] def test_linear_fitter_1set(self): """1 set 1D x, 1pset""" expected = np.array([0, 1, 1, 1]) p1 = models.Polynomial1D(3) p1.parameters = [0, 1, 1, 1] y1 = p1(self.x1) pfit = fitting.LinearLSQFitter() model = pfit(p1, self.x1, y1) assert_allclose(model.parameters, expected, atol=10 ** (-7)) def test_linear_fitter_Nset(self): """1 set 1D x, 2 sets 1D y, 2 param_sets""" expected = np.array([[0, 0], [1, 1], [2, 2], [3, 3]]) p1 = models.Polynomial1D(3, n_models=2) p1.parameters = [0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0] params = {} for i in range(4): params[p1.param_names[i]] = [i, i] p1 = models.Polynomial1D(3, model_set_axis=0, **params) y1 = p1(self.x1, model_set_axis=False) pfit = fitting.LinearLSQFitter() model = pfit(p1, self.x1, y1) assert_allclose(model.param_sets, expected, atol=10 ** (-7)) def test_linear_fitter_1dcheb(self): """1 pset, 1 set 1D x, 1 set 1D y, Chebyshev 1D polynomial""" expected = np.array( [ [ 2817.2499999999995, 4226.6249999999991, 1680.7500000000009, 273.37499999999926, ] ] ).T ch1 = models.Chebyshev1D(3) ch1.parameters = [0, 1, 2, 3] y1 = ch1(self.x1) pfit = fitting.LinearLSQFitter() model = pfit(ch1, self.x1, y1) assert_allclose(model.param_sets, expected, atol=10 ** (-2)) def test_linear_fitter_1dlegend(self): """ 1 pset, 1 set 1D x, 1 set 1D y, Legendre 1D polynomial """ expected = np.array( [ [ 1925.5000000000011, 3444.7500000000005, 1883.2500000000014, 364.4999999999996, ] ] ).T leg1 = models.Legendre1D(3) leg1.parameters = [1, 2, 3, 4] y1 = leg1(self.x1) pfit = fitting.LinearLSQFitter() model = pfit(leg1, self.x1, y1) assert_allclose(model.param_sets, expected, atol=10 ** (-12)) def test_linear_fitter_1set2d(self): p2 = models.Polynomial2D(2) p2.parameters = [0, 1, 2, 3, 4, 5] expected = [0, 1, 2, 3, 4, 5] z = p2(self.x, self.y) pfit = fitting.LinearLSQFitter() model = pfit(p2, self.x, self.y, z) assert_allclose(model.parameters, expected, atol=10 ** (-12)) assert_allclose(model(self.x, self.y), z, atol=10 ** (-12)) def test_wrong_numpset(self): """ A ValueError is raised if a 1 data set (1d x, 1d y) is fit with a model with multiple parameter sets. """ MESSAGE = ( r"Number of data sets .* is expected to equal the number of parameter sets" ) with pytest.raises(ValueError, match=MESSAGE): p1 = models.Polynomial1D(5) y1 = p1(self.x1) p1 = models.Polynomial1D(5, n_models=2) pfit = fitting.LinearLSQFitter() pfit(p1, self.x1, y1) def test_wrong_pset(self): """A case of 1 set of x and multiple sets of y and parameters.""" expected = np.array( [ [1, 0], [1, 1], [1, 2], [1, 3], [1, 4], [1, 5], ] ) p1 = models.Polynomial1D(5, n_models=2) params = {} for i in range(6): params[p1.param_names[i]] = [1, i] p1 = models.Polynomial1D(5, model_set_axis=0, **params) y1 = p1(self.x1, model_set_axis=False) pfit = fitting.LinearLSQFitter() model = pfit(p1, self.x1, y1) assert_allclose(model.param_sets, expected, atol=10 ** (-7)) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", fitters) def test_nonlinear_lsqt_1set_1d(self, fitter): """1 set 1D x, 1 set 1D y, 1 pset NonLinearFitter""" fitter = fitter() g1 = models.Gaussian1D(10, mean=3, stddev=0.2) y1 = g1(self.x1) model = fitter(g1, self.x1, y1) assert_allclose(model.parameters, [10, 3, 0.2]) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", fitters) def test_nonlinear_lsqt_Nset_1d(self, fitter): """1 set 1D x, 1 set 1D y, 2 param_sets, NonLinearFitter""" fitter = fitter() MESSAGE = r"Non-linear fitters can only fit one data set at a time" with pytest.raises(ValueError, match=MESSAGE): g1 = models.Gaussian1D( [10.2, 10], mean=[3, 3.2], stddev=[0.23, 0.2], n_models=2 ) y1 = g1(self.x1, model_set_axis=False) _ = fitter(g1, self.x1, y1) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", fitters) def test_nonlinear_lsqt_1set_2d(self, fitter): """1 set 2d x, 1set 2D y, 1 pset, NonLinearFitter""" fitter = fitter() g2 = models.Gaussian2D( 10, x_mean=3, y_mean=4, x_stddev=0.3, y_stddev=0.2, theta=0 ) z = g2(self.x, self.y) model = fitter(g2, self.x, self.y, z) assert_allclose(model.parameters, [10, 3, 4, 0.3, 0.2, 0]) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", fitters) def test_nonlinear_lsqt_Nset_2d(self, fitter): """1 set 2d x, 1set 2D y, 2 param_sets, NonLinearFitter""" fitter = fitter() MESSAGE = ( r"Input argument .* does not have the correct dimensions in .* for a model" r" set with .*" ) with pytest.raises(ValueError, match=MESSAGE): g2 = models.Gaussian2D( [10, 10], [3, 3], [4, 4], x_stddev=[0.3, 0.3], y_stddev=[0.2, 0.2], theta=[0, 0], n_models=2, ) z = g2(self.x.flatten(), self.y.flatten()) _ = fitter(g2, self.x, self.y, z) class TestEvaluation: """ Test various input options to model evaluation TestFitting actually covers evaluation of polynomials """ def setup_class(self): self.x1 = np.arange(20) self.y, self.x = np.mgrid[:10, :10] def test_non_linear_NYset(self): """ This case covers: N param sets , 1 set 1D x --> N 1D y data """ g1 = models.Gaussian1D([10, 10], [3, 3], [0.2, 0.2], n_models=2) y1 = g1(self.x1, model_set_axis=False) assert np.all((y1[0, :] - y1[1, :]).nonzero() == np.array([])) def test_non_linear_NXYset(self): """ This case covers: N param sets , N sets 1D x --> N N sets 1D y data """ g1 = models.Gaussian1D([10, 10], [3, 3], [0.2, 0.2], n_models=2) xx = np.array([self.x1, self.x1]) y1 = g1(xx) assert_allclose(y1[:, 0], y1[:, 1], atol=10 ** (-12)) def test_p1_1set_1pset(self): """1 data set, 1 pset, Polynomial1D""" p1 = models.Polynomial1D(4) y1 = p1(self.x1) assert y1.shape == (20,) def test_p1_nset_npset(self): """N data sets, N param_sets, Polynomial1D""" p1 = models.Polynomial1D(4, n_models=2) y1 = p1(np.array([self.x1, self.x1]).T, model_set_axis=-1) assert y1.shape == (20, 2) assert_allclose(y1[0, :], y1[1, :], atol=10 ** (-12)) def test_p2_1set_1pset(self): """1 pset, 1 2D data set, Polynomial2D""" p2 = models.Polynomial2D(5) z = p2(self.x, self.y) assert z.shape == (10, 10) def test_p2_nset_npset(self): """N param_sets, N 2D data sets, Poly2d""" p2 = models.Polynomial2D(5, n_models=2) xx = np.array([self.x, self.x]) yy = np.array([self.y, self.y]) z = p2(xx, yy) assert z.shape == (2, 10, 10) def test_nset_domain(self): """ Test model set with negative model_set_axis. In this case model_set_axis=-1 is identical to model_set_axis=1. """ xx = np.array([self.x1, self.x1]).T xx[0, 0] = 100 xx[1, 0] = 100 xx[2, 0] = 99 p1 = models.Polynomial1D(5, c0=[1, 2], c1=[3, 4], n_models=2) yy = p1(xx, model_set_axis=-1) assert_allclose(xx.shape, yy.shape) yy1 = p1(xx, model_set_axis=1) assert_allclose(yy, yy1) def test_evaluate_gauss2d(self): cov = np.array([[1.0, 0.8], [0.8, 3]]) g = models.Gaussian2D(1.0, 5.0, 4.0, cov_matrix=cov) y, x = np.mgrid[:10, :10] g(x, y) class TModel_1_1(Fittable1DModel): p1 = Parameter() p2 = Parameter() @staticmethod def evaluate(x, p1, p2): return x + p1 + p2 class TestSingleInputSingleOutputSingleModel: """ A suite of tests to check various cases of parameter and input combinations on models with n_input = n_output = 1 on a toy model with n_models=1. Many of these tests mirror test cases in ``astropy.modeling.tests.test_parameters.TestParameterInitialization``, except that this tests how different parameter arrangements interact with different types of model inputs. """ def test_scalar_parameters_scalar_input(self): """ Scalar parameters with a scalar input should return a scalar. """ t = TModel_1_1(1, 10) y = t(100) assert isinstance(y, float) assert np.ndim(y) == 0 assert y == 111 def test_scalar_parameters_1d_array_input(self): """ Scalar parameters should broadcast with an array input to result in an array output of the same shape as the input. """ t = TModel_1_1(1, 10) y = t(np.arange(5) * 100) assert isinstance(y, np.ndarray) assert np.shape(y) == (5,) assert np.all(y == [11, 111, 211, 311, 411]) def test_scalar_parameters_2d_array_input(self): """ Scalar parameters should broadcast with an array input to result in an array output of the same shape as the input. """ t = TModel_1_1(1, 10) y = t(np.arange(6).reshape(2, 3) * 100) assert isinstance(y, np.ndarray) assert np.shape(y) == (2, 3) assert np.all(y == [[11, 111, 211], [311, 411, 511]]) def test_scalar_parameters_3d_array_input(self): """ Scalar parameters should broadcast with an array input to result in an array output of the same shape as the input. """ t = TModel_1_1(1, 10) y = t(np.arange(12).reshape(2, 3, 2) * 100) assert isinstance(y, np.ndarray) assert np.shape(y) == (2, 3, 2) assert np.all( y == [ [[11, 111], [211, 311], [411, 511]], [[611, 711], [811, 911], [1011, 1111]], ] ) def test_1d_array_parameters_scalar_input(self): """ Array parameters should all be broadcastable with each other, and with a scalar input the output should be broadcast to the maximum dimensions of the parameters. """ t = TModel_1_1([1, 2], [10, 20]) y = t(100) assert isinstance(y, np.ndarray) assert np.shape(y) == (2,) assert np.all(y == [111, 122]) def test_1d_array_parameters_1d_array_input(self): """ When given an array input it must be broadcastable with all the parameters. """ t = TModel_1_1([1, 2], [10, 20]) y1 = t([100, 200]) assert np.shape(y1) == (2,) assert np.all(y1 == [111, 222]) y2 = t([[100], [200]]) assert np.shape(y2) == (2, 2) assert np.all(y2 == [[111, 122], [211, 222]]) MESSAGE = ( r"self input argument 'x' of shape .* cannot be broadcast with parameter" r" 'p1' of shape .*" ) with pytest.raises(ValueError, match=MESSAGE): # Doesn't broadcast t([100, 200, 300]) def test_2d_array_parameters_2d_array_input(self): """ When given an array input it must be broadcastable with all the parameters. """ t = TModel_1_1([[1, 2], [3, 4]], [[10, 20], [30, 40]]) y1 = t([[100, 200], [300, 400]]) assert np.shape(y1) == (2, 2) assert np.all(y1 == [[111, 222], [333, 444]]) y2 = t([[[[100]], [[200]]], [[[300]], [[400]]]]) assert np.shape(y2) == (2, 2, 2, 2) assert np.all( y2 == [ [[[111, 122], [133, 144]], [[211, 222], [233, 244]]], [[[311, 322], [333, 344]], [[411, 422], [433, 444]]], ] ) MESSAGE = ( r"self input argument .* of shape .* cannot be broadcast with parameter .*" r" of shape .*" ) with pytest.raises(ValueError, match=MESSAGE): # Doesn't broadcast t([[100, 200, 300], [400, 500, 600]]) def test_mixed_array_parameters_1d_array_input(self): """ When given an array input it must be broadcastable with all the parameters. """ t = TModel_1_1( [ [[0.01, 0.02, 0.03], [0.04, 0.05, 0.06]], [[0.07, 0.08, 0.09], [0.10, 0.11, 0.12]], ], [1, 2, 3], ) y1 = t([10, 20, 30]) assert np.shape(y1) == (2, 2, 3) assert_allclose( y1, [ [[11.01, 22.02, 33.03], [11.04, 22.05, 33.06]], [[11.07, 22.08, 33.09], [11.10, 22.11, 33.12]], ], ) y2 = t([[[[10]]], [[[20]]], [[[30]]]]) assert np.shape(y2) == (3, 2, 2, 3) assert_allclose( y2, [ [ [[11.01, 12.02, 13.03], [11.04, 12.05, 13.06]], [[11.07, 12.08, 13.09], [11.10, 12.11, 13.12]], ], [ [[21.01, 22.02, 23.03], [21.04, 22.05, 23.06]], [[21.07, 22.08, 23.09], [21.10, 22.11, 23.12]], ], [ [[31.01, 32.02, 33.03], [31.04, 32.05, 33.06]], [[31.07, 32.08, 33.09], [31.10, 32.11, 33.12]], ], ], ) class TestSingleInputSingleOutputTwoModel: """ A suite of tests to check various cases of parameter and input combinations on models with n_input = n_output = 1 on a toy model with n_models=2. Many of these tests mirror test cases in ``astropy.modeling.tests.test_parameters.TestParameterInitialization``, except that this tests how different parameter arrangements interact with different types of model inputs. With n_models=2 all outputs should have a first dimension of size 2 (unless defined with model_set_axis != 0). """ def test_scalar_parameters_scalar_input(self): """ Scalar parameters with a scalar input should return a 1-D array with size equal to the number of models. """ t = TModel_1_1([1, 2], [10, 20], n_models=2) y = t(100) assert np.shape(y) == (2,) assert np.all(y == [111, 122]) def test_scalar_parameters_1d_array_input(self): """ The dimension of the input should match the number of models unless model_set_axis=False is given, in which case the input is copied across all models. """ t = TModel_1_1([1, 2], [10, 20], n_models=2) MESSAGE = ( r"Input argument .* does not have the correct dimensions in .* for a model" r" set with .*" ) with pytest.raises(ValueError, match=MESSAGE): t(np.arange(5) * 100) y1 = t([100, 200]) assert np.shape(y1) == (2,) assert np.all(y1 == [111, 222]) y2 = t([100, 200], model_set_axis=False) # In this case the value [100, 200, 300] should be evaluated on each # model rather than evaluating the first model with 100 and the second # model with 200 assert np.shape(y2) == (2, 2) assert np.all(y2 == [[111, 211], [122, 222]]) y3 = t([100, 200, 300], model_set_axis=False) assert np.shape(y3) == (2, 3) assert np.all(y3 == [[111, 211, 311], [122, 222, 322]]) def test_scalar_parameters_2d_array_input(self): """ The dimension of the input should match the number of models unless model_set_axis=False is given, in which case the input is copied across all models. """ t = TModel_1_1([1, 2], [10, 20], n_models=2) y1 = t(np.arange(6).reshape(2, 3) * 100) assert np.shape(y1) == (2, 3) assert np.all(y1 == [[11, 111, 211], [322, 422, 522]]) y2 = t(np.arange(6).reshape(2, 3) * 100, model_set_axis=False) assert np.shape(y2) == (2, 2, 3) assert np.all( y2 == [[[11, 111, 211], [311, 411, 511]], [[22, 122, 222], [322, 422, 522]]] ) def test_scalar_parameters_3d_array_input(self): """ The dimension of the input should match the number of models unless model_set_axis=False is given, in which case the input is copied across all models. """ t = TModel_1_1([1, 2], [10, 20], n_models=2) data = np.arange(12).reshape(2, 3, 2) * 100 y1 = t(data) assert np.shape(y1) == (2, 3, 2) assert np.all( y1 == [ [[11, 111], [211, 311], [411, 511]], [[622, 722], [822, 922], [1022, 1122]], ] ) y2 = t(data, model_set_axis=False) assert np.shape(y2) == (2, 2, 3, 2) assert np.all(y2 == np.array([data + 11, data + 22])) def test_1d_array_parameters_scalar_input(self): """ Array parameters should all be broadcastable with each other, and with a scalar input the output should be broadcast to the maximum dimensions of the parameters. """ t = TModel_1_1([[1, 2, 3], [4, 5, 6]], [[10, 20, 30], [40, 50, 60]], n_models=2) y = t(100) assert np.shape(y) == (2, 3) assert np.all(y == [[111, 122, 133], [144, 155, 166]]) def test_1d_array_parameters_1d_array_input(self): """ When the input is an array, if model_set_axis=False then it must broadcast with the shapes of the parameters (excluding the model_set_axis). Otherwise all dimensions must be broadcastable. """ t = TModel_1_1([[1, 2, 3], [4, 5, 6]], [[10, 20, 30], [40, 50, 60]], n_models=2) MESSAGE = ( r"Input argument .* does not have the correct dimensions in .* for a model" r" set with .*" ) with pytest.raises(ValueError, match=MESSAGE): y1 = t([100, 200, 300]) y1 = t([100, 200]) assert np.shape(y1) == (2, 3) assert np.all(y1 == [[111, 122, 133], [244, 255, 266]]) MESSAGE = ( r"Model input argument .* of shape .* cannot be broadcast with parameter .*" r" of shape .*" ) with pytest.raises(ValueError, match=MESSAGE): # Doesn't broadcast with the shape of the parameters, (3,) y2 = t([100, 200], model_set_axis=False) y2 = t([100, 200, 300], model_set_axis=False) assert np.shape(y2) == (2, 3) assert np.all(y2 == [[111, 222, 333], [144, 255, 366]]) def test_2d_array_parameters_2d_array_input(self): t = TModel_1_1( [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[10, 20], [30, 40]], [[50, 60], [70, 80]]], n_models=2, ) y1 = t([[100, 200], [300, 400]]) assert np.shape(y1) == (2, 2, 2) assert np.all( y1 == [ [[111, 222], [133, 244]], [[355, 466], [377, 488]], ] ) MESSAGE = ( r"Model input argument .* of shape .* cannot be broadcast with parameter .*" r" of shape .*" ) with pytest.raises(ValueError, match=MESSAGE): y2 = t([[100, 200, 300], [400, 500, 600]]) y2 = t([[[100, 200], [300, 400]], [[500, 600], [700, 800]]]) assert np.shape(y2) == (2, 2, 2) assert np.all( y2 == [ [[111, 222], [333, 444]], [[555, 666], [777, 888]], ] ) def test_mixed_array_parameters_1d_array_input(self): t = TModel_1_1( [ [[0.01, 0.02, 0.03], [0.04, 0.05, 0.06]], [[0.07, 0.08, 0.09], [0.10, 0.11, 0.12]], ], [[1, 2, 3], [4, 5, 6]], n_models=2, ) MESSAGE = ( r"Input argument .* does not have the correct dimensions in .* for a model" r" set with .*" ) with pytest.raises(ValueError, match=MESSAGE): y = t([10, 20, 30]) y = t([10, 20, 30], model_set_axis=False) assert np.shape(y) == (2, 2, 3) assert_allclose( y, [ [[11.01, 22.02, 33.03], [11.04, 22.05, 33.06]], [[14.07, 25.08, 36.09], [14.10, 25.11, 36.12]], ], ) class TModel_1_2(FittableModel): n_inputs = 1 n_outputs = 2 p1 = Parameter() p2 = Parameter() p3 = Parameter() @staticmethod def evaluate(x, p1, p2, p3): return (x + p1 + p2, x + p1 + p2 + p3) class TestSingleInputDoubleOutputSingleModel: """ A suite of tests to check various cases of parameter and input combinations on models with n_input = 1 but n_output = 2 on a toy model with n_models=1. As of writing there are not enough controls to adjust how outputs from such a model should be formatted (currently the shapes of outputs are assumed to be directly associated with the shapes of corresponding inputs when n_inputs == n_outputs). For now, the approach taken for cases like this is to assume all outputs should have the same format. """ def test_scalar_parameters_scalar_input(self): """ Scalar parameters with a scalar input should return a scalar. """ t = TModel_1_2(1, 10, 1000) y, z = t(100) assert isinstance(y, float) assert isinstance(z, float) assert np.ndim(y) == np.ndim(z) == 0 assert y == 111 assert z == 1111 def test_scalar_parameters_1d_array_input(self): """ Scalar parameters should broadcast with an array input to result in an array output of the same shape as the input. """ t = TModel_1_2(1, 10, 1000) y, z = t(np.arange(5) * 100) assert isinstance(y, np.ndarray) assert isinstance(z, np.ndarray) assert np.shape(y) == np.shape(z) == (5,) assert np.all(y == [11, 111, 211, 311, 411]) assert np.all(z == (y + 1000)) def test_scalar_parameters_2d_array_input(self): """ Scalar parameters should broadcast with an array input to result in an array output of the same shape as the input. """ t = TModel_1_2(1, 10, 1000) y, z = t(np.arange(6).reshape(2, 3) * 100) assert isinstance(y, np.ndarray) assert isinstance(z, np.ndarray) assert np.shape(y) == np.shape(z) == (2, 3) assert np.all(y == [[11, 111, 211], [311, 411, 511]]) assert np.all(z == (y + 1000)) def test_scalar_parameters_3d_array_input(self): """ Scalar parameters should broadcast with an array input to result in an array output of the same shape as the input. """ t = TModel_1_2(1, 10, 1000) y, z = t(np.arange(12).reshape(2, 3, 2) * 100) assert isinstance(y, np.ndarray) assert isinstance(z, np.ndarray) assert np.shape(y) == np.shape(z) == (2, 3, 2) assert np.all( y == [ [[11, 111], [211, 311], [411, 511]], [[611, 711], [811, 911], [1011, 1111]], ] ) assert np.all(z == (y + 1000)) def test_1d_array_parameters_scalar_input(self): """ Array parameters should all be broadcastable with each other, and with a scalar input the output should be broadcast to the maximum dimensions of the parameters. """ t = TModel_1_2([1, 2], [10, 20], [1000, 2000]) y, z = t(100) assert isinstance(y, np.ndarray) assert isinstance(z, np.ndarray) assert np.shape(y) == np.shape(z) == (2,) assert np.all(y == [111, 122]) assert np.all(z == [1111, 2122]) def test_1d_array_parameters_1d_array_input(self): """ When given an array input it must be broadcastable with all the parameters. """ t = TModel_1_2([1, 2], [10, 20], [1000, 2000]) y1, z1 = t([100, 200]) assert np.shape(y1) == np.shape(z1) == (2,) assert np.all(y1 == [111, 222]) assert np.all(z1 == [1111, 2222]) y2, z2 = t([[100], [200]]) assert np.shape(y2) == np.shape(z2) == (2, 2) assert np.all(y2 == [[111, 122], [211, 222]]) assert np.all(z2 == [[1111, 2122], [1211, 2222]]) MESSAGE = ( r"self input argument .* of shape .* cannot be broadcast with parameter .*" r" of shape .*" ) with pytest.raises(ValueError, match=MESSAGE): # Doesn't broadcast y3, z3 = t([100, 200, 300]) def test_2d_array_parameters_2d_array_input(self): """ When given an array input it must be broadcastable with all the parameters. """ t = TModel_1_2( [[1, 2], [3, 4]], [[10, 20], [30, 40]], [[1000, 2000], [3000, 4000]] ) y1, z1 = t([[100, 200], [300, 400]]) assert np.shape(y1) == np.shape(z1) == (2, 2) assert np.all(y1 == [[111, 222], [333, 444]]) assert np.all(z1 == [[1111, 2222], [3333, 4444]]) y2, z2 = t([[[[100]], [[200]]], [[[300]], [[400]]]]) assert np.shape(y2) == np.shape(z2) == (2, 2, 2, 2) assert np.all( y2 == [ [[[111, 122], [133, 144]], [[211, 222], [233, 244]]], [[[311, 322], [333, 344]], [[411, 422], [433, 444]]], ] ) assert np.all( z2 == [ [[[1111, 2122], [3133, 4144]], [[1211, 2222], [3233, 4244]]], [[[1311, 2322], [3333, 4344]], [[1411, 2422], [3433, 4444]]], ] ) MESSAGE = ( r"self input argument .* of shape .* cannot be broadcast with parameter .*" r" of shape .*" ) with pytest.raises(ValueError, match=MESSAGE): # Doesn't broadcast y3, z3 = t([[100, 200, 300], [400, 500, 600]]) def test_mixed_array_parameters_1d_array_input(self): """ When given an array input it must be broadcastable with all the parameters. """ t = TModel_1_2( [ [[0.01, 0.02, 0.03], [0.04, 0.05, 0.06]], [[0.07, 0.08, 0.09], [0.10, 0.11, 0.12]], ], [1, 2, 3], [100, 200, 300], ) y1, z1 = t([10, 20, 30]) assert np.shape(y1) == np.shape(z1) == (2, 2, 3) assert_allclose( y1, [ [[11.01, 22.02, 33.03], [11.04, 22.05, 33.06]], [[11.07, 22.08, 33.09], [11.10, 22.11, 33.12]], ], ) assert_allclose( z1, [ [[111.01, 222.02, 333.03], [111.04, 222.05, 333.06]], [[111.07, 222.08, 333.09], [111.10, 222.11, 333.12]], ], ) y2, z2 = t([[[[10]]], [[[20]]], [[[30]]]]) assert np.shape(y2) == np.shape(z2) == (3, 2, 2, 3) assert_allclose( y2, [ [ [[11.01, 12.02, 13.03], [11.04, 12.05, 13.06]], [[11.07, 12.08, 13.09], [11.10, 12.11, 13.12]], ], [ [[21.01, 22.02, 23.03], [21.04, 22.05, 23.06]], [[21.07, 22.08, 23.09], [21.10, 22.11, 23.12]], ], [ [[31.01, 32.02, 33.03], [31.04, 32.05, 33.06]], [[31.07, 32.08, 33.09], [31.10, 32.11, 33.12]], ], ], ) assert_allclose( z2, [ [ [[111.01, 212.02, 313.03], [111.04, 212.05, 313.06]], [[111.07, 212.08, 313.09], [111.10, 212.11, 313.12]], ], [ [[121.01, 222.02, 323.03], [121.04, 222.05, 323.06]], [[121.07, 222.08, 323.09], [121.10, 222.11, 323.12]], ], [ [[131.01, 232.02, 333.03], [131.04, 232.05, 333.06]], [[131.07, 232.08, 333.09], [131.10, 232.11, 333.12]], ], ], ) # test broadcasting rules broadcast_models = [ {"model": models.Identity(2), "inputs": [0, [1, 1]], "outputs": [0, [1, 1]]}, {"model": models.Identity(2), "inputs": [[1, 1], 0], "outputs": [[1, 1], 0]}, {"model": models.Mapping((0, 1)), "inputs": [0, [1, 1]], "outputs": [0, [1, 1]]}, {"model": models.Mapping((1, 0)), "inputs": [0, [1, 1]], "outputs": [[1, 1], 0]}, { "model": models.Mapping((1, 0), n_inputs=3), "inputs": [0, [1, 1], 2], "outputs": [[1, 1], 0], }, { "model": models.Mapping((0, 1, 0)), "inputs": [0, [1, 1]], "outputs": [0, [1, 1], 0], }, { "model": models.Mapping((0, 1, 1)), "inputs": [0, [1, 1]], "outputs": [0, [1, 1], [1, 1]], }, {"model": models.Polynomial2D(1, c0_0=1), "inputs": [0, [1, 1]], "outputs": [1, 1]}, {"model": models.Polynomial2D(1, c0_0=1), "inputs": [0, 1], "outputs": 1}, { "model": models.Gaussian2D(1, 1, 2, 1, 1.2), "inputs": [0, [1, 1]], "outputs": [0.42860385, 0.42860385], }, { "model": models.Gaussian2D(1, 1, 2, 1, 1.2), "inputs": [0, 1], "outputs": 0.428603846153, }, { "model": models.Polynomial2D(1, c0_0=1) & models.Polynomial2D(1, c0_0=2), "inputs": [1, 1, 1, 1], "outputs": (1, 2), }, { "model": models.Polynomial2D(1, c0_0=1) & models.Polynomial2D(1, c0_0=2), "inputs": [1, 1, [1, 1], [1, 1]], "outputs": (1, [2, 2]), }, { "model": models.math.MultiplyUfunc(), "inputs": [np.array([np.linspace(0, 1, 5)]).T, np.arange(2)], "outputs": np.array( [[0.0, 0.0], [0.0, 0.25], [0.0, 0.5], [0.0, 0.75], [0.0, 1.0]] ), }, ] @pytest.mark.parametrize("model", broadcast_models) def test_mixed_input(model): result = model["model"](*model["inputs"]) if np.isscalar(result): assert_allclose(result, model["outputs"]) else: for i in range(len(result)): assert_allclose(result[i], model["outputs"][i]) def test_more_outputs(): class M(FittableModel): standard_broadcasting = False n_inputs = 2 n_outputs = 3 a = Parameter() def evaluate(self, x, y, a): return a * x, a - x, a + y def __call__(self, *args, **kwargs): inputs, _ = super().prepare_inputs(*args, **kwargs) outputs = self.evaluate(*inputs, *self.parameters) output_shapes = [out.shape for out in outputs] output_shapes = [() if shape == (1,) else shape for shape in output_shapes] return self.prepare_outputs((tuple(output_shapes),), *outputs, **kwargs) c = M(1) result = c([1, 1], 1) expected = [[1.0, 1.0], [0.0, 0.0], 2.0] for r, e in zip(result, expected): assert_allclose(r, e) c = M(1) result = c(1, [1, 1]) expected = [1.0, 0.0, [2.0, 2.0]] for r, e in zip(result, expected): assert_allclose(r, e) class TInputFormatter(Model): """ A toy model to test input/output formatting. """ n_inputs = 2 n_outputs = 2 outputs = ("x", "y") @staticmethod def evaluate(x, y): return x, y def test_format_input_scalars(): model = TInputFormatter() result = model(1, 2) assert result == (1, 2) def test_format_input_arrays(): model = TInputFormatter() result = model([1, 1], [2, 2]) assert_allclose(result, (np.array([1, 1]), np.array([2, 2]))) def test_format_input_arrays_transposed(): model = TInputFormatter() input = np.array([[1, 1]]).T, np.array([[2, 2]]).T result = model(*input) assert_allclose(result, input) @pytest.mark.parametrize( "model", [ models.Gaussian2D(), models.Polynomial2D(1), models.Rotation2D(), models.Pix2Sky_TAN(), models.Tabular2D(lookup_table=np.ones((4, 5))), ], ) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_call_keyword_args_1(model): """ Test calling a model with positional, keywrd and a mixture of both arguments. """ positional = model(1, 2) assert_allclose(positional, model(x=1, y=2)) assert_allclose(positional, model(1, y=2)) model.inputs = ("r", "t") assert_allclose(positional, model(r=1, t=2)) assert_allclose(positional, model(1, t=2)) assert_allclose(positional, model(1, 2)) MESSAGE = r"Too many input arguments - expected 2, got .*" with pytest.raises(ValueError, match=MESSAGE): model(1, 2, 3) with pytest.raises(ValueError, match=MESSAGE): model(1, 2, t=12, r=3) MESSAGE = r"Missing input arguments - expected 2, got 1" with pytest.raises(ValueError, match=MESSAGE): model(1) @pytest.mark.parametrize( "model", [ models.Gaussian1D(), models.Polynomial1D(1), models.Tabular1D(lookup_table=np.ones((5,))), ], ) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_call_keyword_args_2(model): """ Test calling a model with positional, keywrd and a mixture of both arguments. """ positional = model(1) assert_allclose(positional, model(x=1)) model.inputs = ("r",) assert_allclose(positional, model(r=1)) MESSAGE = r"Too many input arguments - expected .*, got .*" with pytest.raises(ValueError, match=MESSAGE): model(1, 2, 3) with pytest.raises(ValueError, match=MESSAGE): model(1, 2, t=12, r=3) MESSAGE = r"Missing input arguments - expected 1, got 0" with pytest.raises(ValueError, match=MESSAGE): model() @pytest.mark.parametrize( "model", [ models.Gaussian2D() | models.Polynomial1D(1), models.Gaussian1D() & models.Polynomial1D(1), models.Gaussian2D() + models.Polynomial2D(1), models.Gaussian2D() - models.Polynomial2D(1), models.Gaussian2D() * models.Polynomial2D(1), models.Identity(2) | models.Polynomial2D(1), models.Mapping((1,)) | models.Polynomial1D(1), ], ) def test_call_keyword_args_3(model): """ Test calling a model with positional, keywrd and a mixture of both arguments. """ positional = model(1, 2) model.inputs = ("r", "t") assert_allclose(positional, model(r=1, t=2)) assert_allclose(positional, model(1, t=2)) MESSAGE = r"Too many input arguments - expected .*, got .*" with pytest.raises(ValueError, match=MESSAGE): model(1, 2, 3) with pytest.raises(ValueError, match=MESSAGE): model(1, 2, t=12, r=3) MESSAGE = r"Missing input arguments - expected 2, got 0" with pytest.raises(ValueError, match=MESSAGE): model() @pytest.mark.parametrize( "model", [ models.Identity(2), models.Mapping((0, 1)), models.Mapping((1,)), ], ) def test_call_keyword_mappings(model): """ Test calling a model with positional, keywrd and a mixture of both arguments. """ positional = model(1, 2) assert_allclose(positional, model(x0=1, x1=2)) assert_allclose(positional, model(1, x1=2)) # We take a copy before modifying the model since otherwise this changes # the instance used in the parametrize call and affects future test runs. model = model.copy() model.inputs = ("r", "t") assert_allclose(positional, model(r=1, t=2)) assert_allclose(positional, model(1, t=2)) assert_allclose(positional, model(1, 2)) MESSAGE = r"Too many input arguments - expected .*, got .*" with pytest.raises(ValueError, match=MESSAGE): model(1, 2, 3) with pytest.raises(ValueError, match=MESSAGE): model(1, 2, t=12, r=3) MESSAGE = r"Missing input arguments - expected 2, got 1" with pytest.raises(ValueError, match=MESSAGE): model(1)
9dca12bf8c6179864e43d47eec83ea351ef95e51ff308e0113a5fd43956cb57b
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Tests that relate to using quantities/units on parameters of models. """ import numpy as np import pytest from astropy import coordinates as coord from astropy import units as u from astropy.modeling.core import Fittable1DModel, InputParameterError from astropy.modeling.models import ( Const1D, Gaussian1D, Pix2Sky_TAN, RotateNative2Celestial, Rotation2D, ) from astropy.modeling.parameters import Parameter, ParameterDefinitionError from astropy.tests.helper import assert_quantity_allclose from astropy.units import UnitsError class BaseTestModel(Fittable1DModel): @staticmethod def evaluate(x, a): return x def test_parameter_quantity(): """ Basic tests for initializing general models (that do not require units) with parameters that have units attached. """ g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m) assert g.amplitude.value == 1.0 assert g.amplitude.unit is u.J assert g.mean.value == 1.0 assert g.mean.unit is u.m assert g.stddev.value == 0.1 assert g.stddev.unit is u.m def test_parameter_set_quantity(): """ Make sure that parameters that start off as quantities can be set to any other quantity, regardless of whether the units of the new quantity are compatible with the original ones. We basically leave it up to the evaluate method to raise errors if there are issues with incompatible units, and we don't check for consistency at the parameter level. """ g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m) # Try equivalent units g.amplitude = 4 * u.kJ assert_quantity_allclose(g.amplitude, 4 * u.kJ) g.mean = 3 * u.km assert_quantity_allclose(g.mean, 3 * u.km) g.stddev = 2 * u.mm assert_quantity_allclose(g.stddev, 2 * u.mm) # Try different units g.amplitude = 2 * u.s assert_quantity_allclose(g.amplitude, 2 * u.s) g.mean = 2 * u.Jy assert_quantity_allclose(g.mean, 2 * u.Jy) def test_parameter_lose_units(): """ Check that parameters that have been set to a quantity that are then set to a value with no units raise an exception. We do this because setting a parameter to a value with no units is ambiguous if units were set before: if a parameter is 1 * u.Jy and the parameter is then set to 4, does this mean 2 without units, or 2 * u.Jy? """ g = Gaussian1D(1 * u.Jy, 3, 0.1) MESSAGE = ( r"The .* parameter should be given as a .* because it was originally" r" initialized as a .*" ) with pytest.raises(UnitsError, match=MESSAGE): g.amplitude = 2 def test_parameter_add_units(): """ On the other hand, if starting from a parameter with no units, we should be able to add units since this is unambiguous. """ g = Gaussian1D(1, 3, 0.1) g.amplitude = 2 * u.Jy assert_quantity_allclose(g.amplitude, 2 * u.Jy) def test_parameter_change_unit(): """ Test that changing the unit on a parameter does not work. This is an ambiguous operation because it's not clear if it means that the value should be converted or if the unit should be changed without conversion. """ g = Gaussian1D(1, 1 * u.m, 0.1 * u.m) # Setting a unit on a unitless parameter should not work MESSAGE = ( r"Cannot attach units to parameters that were not initially specified with" r" units" ) with pytest.raises(ValueError, match=MESSAGE): g.amplitude.unit = u.Jy # But changing to another unit should not, even if it is an equivalent unit MESSAGE = ( r"Cannot change the unit attribute directly, instead change the parameter to a" r" new quantity" ) with pytest.raises(ValueError, match=MESSAGE): g.mean.unit = u.cm def test_parameter_set_value(): """ Test that changing the value on a parameter works as expected. """ g = Gaussian1D(1 * u.Jy, 1 * u.m, 0.1 * u.m) # To set a parameter to a quantity, we simply do g.amplitude = 2 * u.Jy # If we try setting the value, we need to pass a non-quantity value # TODO: determine whether this is the desired behavior? g.amplitude.value = 4 assert_quantity_allclose(g.amplitude, 4 * u.Jy) assert g.amplitude.value == 4 assert g.amplitude.unit is u.Jy # If we try setting it to a Quantity, we raise an error MESSAGE = ( r"The .value property on parameters should be set to unitless values, not" r" Quantity objects.*" ) with pytest.raises(TypeError, match=MESSAGE): g.amplitude.value = 3 * u.Jy def test_parameter_quantity_property(): """ Test that the quantity property of Parameters behaves as expected """ # Since parameters have a .value and .unit parameter that return just the # value and unit respectively, we also have a .quantity parameter that # returns a Quantity instance. g = Gaussian1D(1 * u.Jy, 1 * u.m, 0.1 * u.m) assert_quantity_allclose(g.amplitude.quantity, 1 * u.Jy) # Setting a parameter to a quantity changes the value and the default unit g.amplitude.quantity = 5 * u.mJy assert g.amplitude.value == 5 assert g.amplitude.unit is u.mJy # And we can also set the parameter to a value with different units g.amplitude.quantity = 4 * u.s assert g.amplitude.value == 4 assert g.amplitude.unit is u.s # But not to a value without units MESSAGE = r"The .quantity attribute should be set to a Quantity object" with pytest.raises(TypeError, match=MESSAGE): g.amplitude.quantity = 3 def test_parameter_default_units_match(): # If the unit and default quantity units are different, raise an error MESSAGE = ( r"parameter default 1.0 m does not have units equivalent to the required" r" unit Jy" ) with pytest.raises(ParameterDefinitionError, match=MESSAGE): class TestC(Fittable1DModel): a = Parameter(default=1.0 * u.m, unit=u.Jy) @pytest.mark.parametrize(("unit", "default"), ((u.m, 1.0), (None, 1 * u.m))) def test_parameter_defaults(unit, default): """ Test that default quantities are correctly taken into account """ class TestModel(BaseTestModel): a = Parameter(default=default, unit=unit) # TODO: decide whether the default property should return a value or # a quantity? # The default unit and value should be set on the class assert TestModel.a.unit == u.m assert TestModel.a.default == 1.0 # Check that the default unit and value are also set on a class instance m = TestModel() assert m.a.unit == u.m assert m.a.default == m.a.value == 1.0 # If the parameter is set to a different value, the default is still the # internal default m = TestModel(2.0 * u.m) assert m.a.unit == u.m assert m.a.value == 2.0 assert m.a.default == 1.0 # Instantiate with a different, but compatible unit m = TestModel(2.0 * u.pc) assert m.a.unit == u.pc assert m.a.value == 2.0 # The default is still in the original units # TODO: but how do we know what those units are if we don't return a # quantity? assert m.a.default == 1.0 # Initialize with a completely different unit m = TestModel(2.0 * u.Jy) assert m.a.unit == u.Jy assert m.a.value == 2.0 # TODO: this illustrates why the default doesn't make sense anymore assert m.a.default == 1.0 # Instantiating with different units works, and just replaces the original unit MESSAGE = r".* requires a Quantity for parameter .*" with pytest.raises(InputParameterError, match=MESSAGE): TestModel(1.0) def test_parameter_quantity_arithmetic(): """ Test that arithmetic operations with properties that have units return the appropriate Quantities. """ g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m) # Addition should work if units are compatible assert g.mean + (1 * u.m) == 2 * u.m assert (1 * u.m) + g.mean == 2 * u.m # Multiplication by a scalar should also preserve the quantity-ness assert g.mean * 2 == (2 * u.m) assert 2 * g.mean == (2 * u.m) # Multiplication by a quantity should result in units being multiplied assert g.mean * (2 * u.m) == (2 * (u.m**2)) assert (2 * u.m) * g.mean == (2 * (u.m**2)) # Negation should work properly too assert -g.mean == (-1 * u.m) assert abs(-g.mean) == g.mean # However, addition of a quantity + scalar should not work MESSAGE = ( r"Can only apply 'add' function to dimensionless quantities when other" r" argument .*" ) with pytest.raises(UnitsError, match=MESSAGE): g.mean + 1 with pytest.raises(UnitsError, match=MESSAGE): 1 + g.mean def test_parameter_quantity_comparison(): """ Basic test of comparison operations on properties with units. """ g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m) # Essentially here we are checking that parameters behave like Quantity assert g.mean == 1 * u.m assert 1 * u.m == g.mean assert g.mean != 1 assert 1 != g.mean assert g.mean < 2 * u.m assert 2 * u.m > g.mean MESSAGE = ( r"Can only apply 'less' function to dimensionless quantities when other" r" argument .*" ) with pytest.raises(UnitsError, match=MESSAGE): g.mean < 2 with pytest.raises(UnitsError, match=MESSAGE): 2 > g.mean g = Gaussian1D([1, 2] * u.J, [1, 2] * u.m, [0.1, 0.2] * u.m) assert np.all(g.mean == [1, 2] * u.m) assert np.all([1, 2] * u.m == g.mean) assert np.all(g.mean != [1, 2]) assert np.all([1, 2] != g.mean) with pytest.raises(UnitsError, match=MESSAGE): g.mean < [3, 4] with pytest.raises(UnitsError, match=MESSAGE): [3, 4] > g.mean def test_parameters_compound_models(): Pix2Sky_TAN() sky_coords = coord.SkyCoord(ra=5.6, dec=-72, unit=u.deg) lon_pole = 180 * u.deg n2c = RotateNative2Celestial(sky_coords.ra, sky_coords.dec, lon_pole) rot = Rotation2D(23) rot | n2c def test_magunit_parameter(): """Regression test for bug reproducer in issue #13133""" unit = u.ABmag c = -20.0 * unit model = Const1D(c) assert model(-23.0 * unit) == c
ffc2693f99e6237ae4b4569d03cbd33865434b808dbab8a2b374746a6608cb69
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Tests for polynomial models.""" # pylint: disable=invalid-name import os import unittest.mock as mk import warnings from itertools import product import numpy as np import pytest from numpy.testing import assert_allclose from astropy import conf, wcs from astropy.io import fits from astropy.modeling import fitting from astropy.modeling.functional_models import Linear1D from astropy.modeling.mappings import Identity from astropy.modeling.polynomial import ( SIP, Chebyshev1D, Chebyshev2D, Hermite1D, Hermite2D, Legendre1D, Legendre2D, OrthoPolynomialBase, Polynomial1D, Polynomial2D, PolynomialBase, ) from astropy.utils.compat.optional_deps import HAS_SCIPY from astropy.utils.data import get_pkg_data_filename from astropy.utils.exceptions import AstropyUserWarning linear1d = { Chebyshev1D: { "args": (3,), "kwargs": {"domain": [1, 10]}, "parameters": {"c0": 1.2, "c1": 2, "c2": 2.3, "c3": 0.2}, "constraints": {"fixed": {"c0": True}}, }, Hermite1D: { "args": (3,), "kwargs": {"domain": [1, 10]}, "parameters": {"c0": 1.2, "c1": 2, "c2": 2.3, "c3": 0.2}, "constraints": {"fixed": {"c0": True}}, }, Legendre1D: { "args": (3,), "kwargs": {"domain": [1, 10]}, "parameters": {"c0": 1.2, "c1": 2, "c2": 2.3, "c3": 0.2}, "constraints": {"fixed": {"c0": True}}, }, Polynomial1D: { "args": (3,), "kwargs": {"domain": [1, 10]}, "parameters": {"c0": 1.2, "c1": 2, "c2": 2.3, "c3": 0.2}, "constraints": {"fixed": {"c0": True}}, }, Linear1D: { "args": (), "kwargs": {}, "parameters": {"intercept": 1.2, "slope": 23.1}, "constraints": {"fixed": {"intercept": True}}, }, } linear2d = { Chebyshev2D: { "args": (1, 1), "kwargs": {"x_domain": [0, 99], "y_domain": [0, 82]}, "parameters": {"c0_0": 1.2, "c1_0": 2, "c0_1": 2.3, "c1_1": 0.2}, "constraints": {"fixed": {"c0_0": True}}, }, Hermite2D: { "args": (1, 1), "kwargs": {"x_domain": [0, 99], "y_domain": [0, 82]}, "parameters": {"c0_0": 1.2, "c1_0": 2, "c0_1": 2.3, "c1_1": 0.2}, "constraints": {"fixed": {"c0_0": True}}, }, Legendre2D: { "args": (1, 1), "kwargs": {"x_domain": [0, 99], "y_domain": [0, 82]}, "parameters": {"c0_0": 1.2, "c1_0": 2, "c0_1": 2.3, "c1_1": 0.2}, "constraints": {"fixed": {"c0_0": True}}, }, Polynomial2D: { "args": (1,), "kwargs": {}, "parameters": {"c0_0": 1.2, "c1_0": 2, "c0_1": 2.3}, "constraints": {"fixed": {"c0_0": True}}, }, } fitters = [ fitting.LevMarLSQFitter, fitting.TRFLSQFitter, fitting.LMLSQFitter, fitting.DogBoxLSQFitter, ] @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") class TestFitting: """Test linear fitter with polynomial models.""" def setup_class(self): self.N = 100 self.M = 100 self.x1 = np.linspace(1, 10, 100) self.y2, self.x2 = np.mgrid[:100, :83] rsn = np.random.default_rng(0) self.n1 = rsn.standard_normal(self.x1.size) * 0.1 self.n2 = rsn.standard_normal(self.x2.size) self.n2.shape = self.x2.shape self.linear_fitter = fitting.LinearLSQFitter() # TODO: Most of these test cases have some pretty repetitive setup that we # could probably factor out @pytest.mark.parametrize( ("model_class", "constraints"), list(product(sorted(linear1d, key=str), (False, True))), ) def test_linear_fitter_1D(self, model_class, constraints): """Test fitting with LinearLSQFitter""" model_args = linear1d[model_class] kwargs = {} kwargs.update(model_args["kwargs"]) kwargs.update(model_args["parameters"]) if constraints: kwargs.update(model_args["constraints"]) model = model_class(*model_args["args"], **kwargs) y1 = model(self.x1) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message=r"The fit may be poorly conditioned", category=AstropyUserWarning, ) model_lin = self.linear_fitter(model, self.x1, y1 + self.n1) if constraints: # For the constraints tests we're not checking the overall fit, # just that the constraint was maintained fixed = model_args["constraints"].get("fixed", None) if fixed: for param, value in fixed.items(): expected = model_args["parameters"][param] assert getattr(model_lin, param).value == expected else: assert_allclose(model_lin.parameters, model.parameters, atol=0.2) @pytest.mark.parametrize( ("model_class", "constraints"), list(product(sorted(linear1d, key=str), (False, True))), ) @pytest.mark.parametrize("fitter", fitters) def test_non_linear_fitter_1D(self, model_class, constraints, fitter): """Test fitting with non-linear LevMarLSQFitter""" fitter = fitter() model_args = linear1d[model_class] kwargs = {} kwargs.update(model_args["kwargs"]) kwargs.update(model_args["parameters"]) if constraints: kwargs.update(model_args["constraints"]) model = model_class(*model_args["args"], **kwargs) y1 = model(self.x1) with pytest.warns(AstropyUserWarning, match="Model is linear in parameters"): model_nlin = fitter(model, self.x1, y1 + self.n1) if constraints: fixed = model_args["constraints"].get("fixed", None) if fixed: for param, value in fixed.items(): expected = model_args["parameters"][param] assert getattr(model_nlin, param).value == expected else: assert_allclose(model_nlin.parameters, model.parameters, atol=0.2) @pytest.mark.parametrize( ("model_class", "constraints"), list(product(sorted(linear2d, key=str), (False, True))), ) def test_linear_fitter_2D(self, model_class, constraints): """Test fitting with LinearLSQFitter""" model_args = linear2d[model_class] kwargs = {} kwargs.update(model_args["kwargs"]) kwargs.update(model_args["parameters"]) if constraints: kwargs.update(model_args["constraints"]) model = model_class(*model_args["args"], **kwargs) z = model(self.x2, self.y2) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message=r"The fit may be poorly conditioned", category=AstropyUserWarning, ) model_lin = self.linear_fitter(model, self.x2, self.y2, z + self.n2) if constraints: fixed = model_args["constraints"].get("fixed", None) if fixed: for param, value in fixed.items(): expected = model_args["parameters"][param] assert getattr(model_lin, param).value == expected else: assert_allclose(model_lin.parameters, model.parameters, atol=0.2) @pytest.mark.parametrize( ("model_class", "constraints"), list(product(sorted(linear2d, key=str), (False, True))), ) @pytest.mark.parametrize("fitter", fitters) def test_non_linear_fitter_2D(self, model_class, constraints, fitter): """Test fitting with non-linear LevMarLSQFitter""" fitter = fitter() model_args = linear2d[model_class] kwargs = {} kwargs.update(model_args["kwargs"]) kwargs.update(model_args["parameters"]) if constraints: kwargs.update(model_args["constraints"]) model = model_class(*model_args["args"], **kwargs) z = model(self.x2, self.y2) with pytest.warns(AstropyUserWarning, match="Model is linear in parameters"): model_nlin = fitter(model, self.x2, self.y2, z + self.n2) if constraints: fixed = model_args["constraints"].get("fixed", None) if fixed: for param, value in fixed.items(): expected = model_args["parameters"][param] assert getattr(model_nlin, param).value == expected else: assert_allclose(model_nlin.parameters, model.parameters, atol=0.2) @pytest.mark.parametrize( "model_class", [cls for cls in list(linear1d) + list(linear2d)] ) def test_polynomial_init_with_constraints(model_class): """ Test that polynomial models can be instantiated with constraints, but no parameters specified. Regression test for https://github.com/astropy/astropy/issues/3606 """ # Just determine which parameter to place a constraint on; it doesn't # matter which parameter it is to exhibit the problem so long as it's a # valid parameter for the model if "1D" in model_class.__name__: param = "c0" else: param = "c0_0" if issubclass(model_class, Linear1D): param = "intercept" if issubclass(model_class, OrthoPolynomialBase): degree = (2, 2) else: degree = (2,) m = model_class(*degree, fixed={param: True}) assert m.fixed[param] is True assert getattr(m, param).fixed is True if issubclass(model_class, OrthoPolynomialBase): assert ( repr(m) == f"<{model_class.__name__}(2, 2, c0_0=0., c1_0=0., c2_0=0., c0_1=0., " "c1_1=0., c2_1=0., c0_2=0., c1_2=0., c2_2=0.)>" ) assert ( str(m) == f"Model: {model_class.__name__}\n" "Inputs: ('x', 'y')\n" "Outputs: ('z',)\n" "Model set size: 1\n" "X_Degree: 2\n" "Y_Degree: 2\n" "Parameters:\n" " c0_0 c1_0 c2_0 c0_1 c1_1 c2_1 c0_2 c1_2 c2_2\n" " ---- ---- ---- ---- ---- ---- ---- ---- ----\n" " 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0" ) else: if model_class.__name__ == "Polynomial2D": assert ( repr(m) == "<Polynomial2D(2, c0_0=0., c1_0=0., c2_0=0., " "c0_1=0., c0_2=0., c1_1=0.)>" ) assert ( str(m) == "Model: Polynomial2D\n" "Inputs: ('x', 'y')\n" "Outputs: ('z',)\n" "Model set size: 1\n" "Degree: 2\n" "Parameters:\n" " c0_0 c1_0 c2_0 c0_1 c0_2 c1_1\n" " ---- ---- ---- ---- ---- ----\n" " 0.0 0.0 0.0 0.0 0.0 0.0" ) elif model_class.__name__ == "Linear1D": assert repr(m) == "<Linear1D(slope=2., intercept=0.)>" assert ( str(m) == "Model: Linear1D\n" "Inputs: ('x',)\n" "Outputs: ('y',)\n" "Model set size: 1\n" "Parameters:\n" " slope intercept\n" " ----- ---------\n" " 2.0 0.0" ) else: assert repr(m) == f"<{model_class.__name__}(2, c0=0., c1=0., c2=0.)>" assert ( str(m) == f"Model: {model_class.__name__}\n" "Inputs: ('x',)\n" "Outputs: ('y',)\n" "Model set size: 1\n" "Degree: 2\n" "Parameters:\n" " c0 c1 c2\n" " --- --- ---\n" " 0.0 0.0 0.0" ) def test_sip_hst(): """Test SIP against astropy.wcs""" test_file = get_pkg_data_filename(os.path.join("data", "hst_sip.hdr")) hdr = fits.Header.fromtextfile(test_file) crpix1 = hdr["CRPIX1"] crpix2 = hdr["CRPIX2"] wobj = wcs.WCS(hdr) a_pars = dict(**hdr["A_*"]) b_pars = dict(**hdr["B_*"]) a_order = a_pars.pop("A_ORDER") b_order = b_pars.pop("B_ORDER") sip = SIP([crpix1, crpix2], a_order, b_order, a_pars, b_pars) coords = [1, 1] rel_coords = [1 - crpix1, 1 - crpix2] astwcs_result = wobj.sip_pix2foc([coords], 1)[0] - rel_coords assert_allclose(sip(1, 1), astwcs_result) # Test changing of inputs and calling it with keyword argumenrts. sip.inputs = ("r", "t") assert_allclose(sip(r=1, t=1), astwcs_result) assert_allclose(sip(1, t=1), astwcs_result) # Test representations assert ( repr(sip) == "<SIP([<Shift(offset=-2048.)>, <Shift(offset=-1024.)>, " "<_SIP1D(4, 'A', A_2_0=0.00000855, A_3_0=-0., A_4_0=0., A_0_2=0.00000217, " "A_0_3=0., A_0_4=0., A_1_1=-0.0000052, A_1_2=-0., A_1_3=-0., " "A_2_1=-0., A_2_2=0., A_3_1=0.)>, " "<_SIP1D(4, 'B', B_2_0=-0.00000175, B_3_0=0., B_4_0=-0., B_0_2=-0.00000722, " "B_0_3=-0., B_0_4=-0., B_1_1=0.00000618, B_1_2=-0., B_1_3=0., " "B_2_1=-0., B_2_2=-0., B_3_1=-0.)>])>" ) with conf.set_temp("max_width", 80): # fmt: off assert str(sip) == ( "Model: SIP\n" " Model: Shift\n" " Inputs: ('x',)\n" " Outputs: ('y',)\n" " Model set size: 1\n" " Parameters:\n" " offset\n" " -------\n" " -2048.0\n" "\n" " Model: Shift\n" " Inputs: ('x',)\n" " Outputs: ('y',)\n" " Model set size: 1\n" " Parameters:\n" " offset\n" " -------\n" " -1024.0\n" "\n" " Model: _SIP1D\n" " Inputs: ('x', 'y')\n" " Outputs: ('z',)\n" " Model set size: 1\n" " Order: 4\n" " Coeff. Prefix: A\n" " Parameters:\n" " A_2_0 A_3_0 ... A_3_1 \n" " --------------------- ---------------------- ... ---------------------\n" " 8.551277582556502e-06 -4.730444829222791e-10 ... 1.971022971660309e-15\n" "\n" " Model: _SIP1D\n" " Inputs: ('x', 'y')\n" " Outputs: ('z',)\n" " Model set size: 1\n" " Order: 4\n" " Coeff. Prefix: B\n" " Parameters:\n" " B_2_0 B_3_0 ... B_3_1 \n" " ---------------------- --------------------- ... ----------------------\n" " -1.746491877058669e-06 8.567635427816317e-11 ... -3.779506805487476e-15\n" ) # fmt: on # Test get num of coeffs assert sip.sip1d_a.get_num_coeff(1) == 6 # Test error MESSAGE = "Degree of polynomial must be 2< deg < 9" sip.sip1d_a.order = 1 with pytest.raises(ValueError, match=MESSAGE): sip.sip1d_a.get_num_coeff(1) sip.sip1d_a.order = 10 with pytest.raises(ValueError, match=MESSAGE): sip.sip1d_a.get_num_coeff(1) def test_sip_irac(): """Test forward and inverse SIP against astropy.wcs""" test_file = get_pkg_data_filename(os.path.join("data", "irac_sip.hdr")) hdr = fits.Header.fromtextfile(test_file) crpix1 = hdr["CRPIX1"] crpix2 = hdr["CRPIX2"] wobj = wcs.WCS(hdr) a_pars = dict(**hdr["A_*"]) b_pars = dict(**hdr["B_*"]) ap_pars = dict(**hdr["AP_*"]) bp_pars = dict(**hdr["BP_*"]) a_order = a_pars.pop("A_ORDER") b_order = b_pars.pop("B_ORDER") ap_order = ap_pars.pop("AP_ORDER") bp_order = bp_pars.pop("BP_ORDER") del a_pars["A_DMAX"] del b_pars["B_DMAX"] pix = [200, 200] rel_pix = [200 - crpix1, 200 - crpix2] sip = SIP( [crpix1, crpix2], a_order, b_order, a_pars, b_pars, ap_order=ap_order, ap_coeff=ap_pars, bp_order=bp_order, bp_coeff=bp_pars, ) foc = wobj.sip_pix2foc([pix], 1) newpix = wobj.sip_foc2pix(foc, 1)[0] assert_allclose(sip(*pix), foc[0] - rel_pix) assert_allclose(sip.inverse(*foc[0]) + foc[0] - rel_pix, newpix - pix) # Test inverse representations assert ( repr(sip.inverse) == "<InverseSIP([<Polynomial2D(2, c0_0=0., c1_0=0.0000114, c2_0=0.00002353, " "c0_1=-0.00000546, c0_2=-0.00000667, c1_1=-0.00001801)>, " "<Polynomial2D(2, c0_0=0., c1_0=-0.00001495, c2_0=0.00000122, c0_1=0.00001975, " "c0_2=-0.00002601, c1_1=0.00002944)>])>" ) assert ( str(sip.inverse) == "Model: InverseSIP\n" " Model: Polynomial2D\n" " Inputs: ('x', 'y')\n" " Outputs: ('z',)\n" " Model set size: 1\n" " Degree: 2\n" " Parameters:\n" " c0_0 c1_0 c2_0 c0_1 c0_2 c1_1 \n" " ---- -------- --------- ---------- ---------- ----------\n" " 0.0 1.14e-05 2.353e-05 -5.463e-06 -6.666e-06 -1.801e-05\n" "\n" " Model: Polynomial2D\n" " Inputs: ('x', 'y')\n" " Outputs: ('z',)\n" " Model set size: 1\n" " Degree: 2\n" " Parameters:\n" " c0_0 c1_0 c2_0 c0_1 c0_2 c1_1 \n" " ---- ---------- --------- --------- ---------- ---------\n" " 0.0 -1.495e-05 1.225e-06 1.975e-05 -2.601e-05 2.944e-05\n" ) def test_sip_no_coeff(): sip = SIP([10, 12], 2, 2) assert_allclose(sip.sip1d_a.parameters, [0.0, 0.0, 0]) assert_allclose(sip.sip1d_b.parameters, [0.0, 0.0, 0]) MESSAGE = r"SIP inverse coefficients are not available" with pytest.raises(NotImplementedError, match=MESSAGE): sip.inverse # Test model set sip = SIP([10, 12], 2, 2, n_models=2) assert sip.sip1d_a.model_set_axis == 0 assert sip.sip1d_b.model_set_axis == 0 @pytest.mark.parametrize( "cls", (Polynomial1D, Chebyshev1D, Legendre1D, Polynomial2D, Chebyshev2D, Legendre2D), ) def test_zero_degree_polynomial(cls): """ A few tests that degree=0 polynomials are correctly evaluated and fitted. Regression test for https://github.com/astropy/astropy/pull/3589 """ MESSAGE = "Degree of polynomial must be positive or null" if cls.n_inputs == 1: # Test 1D polynomials p1 = cls(degree=0, c0=1) assert p1(0) == 1 assert np.all(p1(np.zeros(5)) == np.ones(5)) x = np.linspace(0, 1, 100) # Add a little noise along a straight line y = 1 + np.random.uniform(0, 0.1, len(x)) p1_init = cls(degree=0) fitter = fitting.LinearLSQFitter() p1_fit = fitter(p1_init, x, y) # The fit won't be exact of course, but it should get close to within # 1% assert_allclose(p1_fit.c0, 1, atol=0.10) # Error from negative degree with pytest.raises(ValueError, match=MESSAGE): cls(degree=-1) elif cls.n_inputs == 2: # Test 2D polynomials if issubclass(cls, OrthoPolynomialBase): p2 = cls(x_degree=0, y_degree=0, c0_0=1) # different shaped x and y inputs a = np.array([1, 2, 3]) b = np.array([1, 2]) with mk.patch.object( PolynomialBase, "prepare_inputs", autospec=True, return_value=((a, b), mk.MagicMock()), ): with pytest.raises( ValueError, match=r"Expected input arrays to have the same shape" ): p2.prepare_inputs(mk.MagicMock(), mk.MagicMock()) # Error from negative degree with pytest.raises(ValueError, match=MESSAGE): cls(x_degree=-1, y_degree=0) with pytest.raises(ValueError, match=MESSAGE): cls(x_degree=0, y_degree=-1) else: p2 = cls(degree=0, c0_0=1) # Error from negative degree with pytest.raises(ValueError, match=MESSAGE): cls(degree=-1) assert p2(0, 0) == 1 assert np.all(p2(np.zeros(5), np.zeros(5)) == np.ones(5)) y, x = np.mgrid[0:1:100j, 0:1:100j] z = (1 + np.random.uniform(0, 0.1, x.size)).reshape(100, 100) if issubclass(cls, OrthoPolynomialBase): p2_init = cls(x_degree=0, y_degree=0) else: p2_init = cls(degree=0) fitter = fitting.LinearLSQFitter() p2_fit = fitter(p2_init, x, y, z) assert_allclose(p2_fit.c0_0, 1, atol=0.10) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.parametrize("fitter", fitters) def test_2d_orthopolynomial_in_compound_model(fitter): """ Ensure that OrthoPolynomialBase (ie. Chebyshev2D & Legendre2D) models get evaluated & fitted correctly when part of a compound model. Regression test for https://github.com/astropy/astropy/pull/6085. """ fitter = fitter() y, x = np.mgrid[0:5, 0:5] z = x + y simple_model = Chebyshev2D(2, 2) with pytest.warns(AstropyUserWarning, match="Model is linear in parameters"): simple_fit = fitter(simple_model, x, y, z) compound_model = Identity(2) | Chebyshev2D(2, 2) compound_model.fittable = True compound_model.linear = True with pytest.warns(AstropyUserWarning, match="Model is linear in parameters"): compound_fit = fitter(compound_model, x, y, z) assert_allclose(simple_fit(x, y), compound_fit(x, y), atol=1e-11) def test_Hermite1D_clenshaw(): model = Hermite1D(degree=2) assert model.clenshaw(1, [3]) == 3 assert model.clenshaw(1, [3, 4]) == 11 assert model.clenshaw(1, [3, 4, 5]) == 21 assert model.clenshaw(1, [3, 4, 5, 6]) == -3 def test__fcache(): model = OrthoPolynomialBase(x_degree=2, y_degree=2) MESSAGE = r"Subclasses should implement this" with pytest.raises(NotImplementedError, match=MESSAGE): model._fcache(np.asanyarray(1), np.asanyarray(1)) model = Hermite2D(x_degree=2, y_degree=2) assert model._fcache(np.asanyarray(1), np.asanyarray(1)) == { 0: np.asanyarray(1), 1: 2, 3: np.asanyarray(1), 4: 2, 2: 2.0, 5: -4.0, } model = Legendre2D(x_degree=2, y_degree=2) assert model._fcache(np.asanyarray(1), np.asanyarray(1)) == { 0: np.asanyarray(1), 1: np.asanyarray(1), 2: 1.0, 3: np.asanyarray(1), 4: np.asanyarray(1), 5: 1.0, } model = Chebyshev2D(x_degree=2, y_degree=2) assert model._fcache(np.asanyarray(1), np.asanyarray(1)) == { 0: np.asanyarray(1), 1: np.asanyarray(1), 2: 1.0, 3: np.asanyarray(1), 4: np.asanyarray(1), 5: 1.0, } def test_fit_deriv_shape_error(): model = Hermite2D(x_degree=2, y_degree=2) MESSAGE = r"x and y must have the same shape" with pytest.raises(ValueError, match=MESSAGE): model.fit_deriv(np.array([1, 2]), np.array([3, 4, 5])) model = Chebyshev2D(x_degree=2, y_degree=2) with pytest.raises(ValueError, match=MESSAGE): model.fit_deriv(np.array([1, 2]), np.array([3, 4, 5])) model = Legendre2D(x_degree=2, y_degree=2) with pytest.raises(ValueError, match=MESSAGE): model.fit_deriv(np.array([1, 2]), np.array([3, 4, 5])) model = Polynomial2D(degree=2) MESSAGE = r"Expected x and y to be of equal size" with pytest.raises(ValueError, match=MESSAGE): model.fit_deriv(np.array([1, 2]), np.array([3, 4, 5]))
ad563a25fbe4e0e7af8a38f40edb8bf4cd6e7b40f8db61c10445b9f2eddc9558
# Licensed under a 3-clause BSD style license - see LICENSE.rst import unittest.mock as mk import numpy as np import pytest import astropy.units as u from astropy.coordinates import SpectralCoord from astropy.modeling.bounding_box import ( CompoundBoundingBox, ModelBoundingBox, _BaseInterval, _BaseSelectorArgument, _BoundingDomain, _ignored_interval, _Interval, _SelectorArgument, _SelectorArguments, ) from astropy.modeling.core import Model, fix_inputs from astropy.modeling.models import ( Gaussian1D, Gaussian2D, Identity, Polynomial2D, Scale, Shift, ) class Test_Interval: def test_create(self): lower = mk.MagicMock() upper = mk.MagicMock() interval = _Interval(lower, upper) assert isinstance(interval, _BaseInterval) assert interval.lower == lower assert interval.upper == upper assert interval == (lower, upper) assert interval.__repr__() == f"Interval(lower={lower}, upper={upper})" def test_copy(self): interval = _Interval(0.5, 1.5) copy = interval.copy() assert interval == copy assert id(interval) != id(copy) # Same float values have will have same id assert interval.lower == copy.lower assert id(interval.lower) == id(copy.lower) # Same float values have will have same id assert interval.upper == copy.upper assert id(interval.upper) == id(copy.upper) def test__validate_shape(self): MESSAGE = r"An interval must be some sort of sequence of length 2" lower = mk.MagicMock() upper = mk.MagicMock() interval = _Interval(lower, upper) # Passes (2,) interval._validate_shape((1, 2)) interval._validate_shape([1, 2]) interval._validate_shape((1 * u.m, 2 * u.m)) interval._validate_shape([1 * u.m, 2 * u.m]) # Passes (1, 2) interval._validate_shape(((1, 2),)) interval._validate_shape(([1, 2],)) interval._validate_shape([(1, 2)]) interval._validate_shape([[1, 2]]) interval._validate_shape(((1 * u.m, 2 * u.m),)) interval._validate_shape(([1 * u.m, 2 * u.m],)) interval._validate_shape([(1 * u.m, 2 * u.m)]) interval._validate_shape([[1 * u.m, 2 * u.m]]) # Passes (2, 0) interval._validate_shape((mk.MagicMock(), mk.MagicMock())) interval._validate_shape([mk.MagicMock(), mk.MagicMock()]) # Passes with array inputs: interval._validate_shape((np.array([-2.5, -3.5]), np.array([2.5, 3.5]))) interval._validate_shape( (np.array([-2.5, -3.5, -4.5]), np.array([2.5, 3.5, 4.5])) ) # Fails shape (no units) with pytest.raises(ValueError, match=MESSAGE): interval._validate_shape((1, 2, 3)) with pytest.raises(ValueError, match=MESSAGE): interval._validate_shape([1, 2, 3]) with pytest.raises(ValueError, match=MESSAGE): interval._validate_shape([[1, 2, 3], [4, 5, 6]]) with pytest.raises(ValueError, match=MESSAGE): interval._validate_shape(1) # Fails shape (units) with pytest.raises(ValueError, match=MESSAGE): interval._validate_shape((1 * u.m, 2 * u.m, 3 * u.m)) with pytest.raises(ValueError, match=MESSAGE): interval._validate_shape([1 * u.m, 2 * u.m, 3 * u.m]) with pytest.raises(ValueError, match=MESSAGE): interval._validate_shape( [[1 * u.m, 2 * u.m, 3 * u.m], [4 * u.m, 5 * u.m, 6 * u.m]] ) with pytest.raises(ValueError, match=MESSAGE): interval._validate_shape(1 * u.m) # Fails shape (arrays): with pytest.raises(ValueError, match=MESSAGE): interval._validate_shape( (np.array([-2.5, -3.5]), np.array([2.5, 3.5]), np.array([3, 4])) ) with pytest.raises(ValueError, match=MESSAGE): interval._validate_shape((np.array([-2.5, -3.5]), [2.5, 3.5])) def test__validate_bounds(self): # Passes assert _Interval._validate_bounds(1, 2) == (1, 2) assert _Interval._validate_bounds(1 * u.m, 2 * u.m) == (1 * u.m, 2 * u.m) interval = _Interval._validate_bounds( np.array([-2.5, -3.5]), np.array([2.5, 3.5]) ) assert (interval.lower == np.array([-2.5, -3.5])).all() assert (interval.upper == np.array([2.5, 3.5])).all() # Fails with pytest.warns( RuntimeWarning, match=r"Invalid interval: upper bound 1 is strictly " r"less than lower bound 2\.", ): _Interval._validate_bounds(2, 1) with pytest.warns( RuntimeWarning, match=r"Invalid interval: upper bound 1\.0 m is strictly " r"less than lower bound 2\.0 m\.", ): _Interval._validate_bounds(2 * u.m, 1 * u.m) def test_validate(self): # Passes assert _Interval.validate((1, 2)) == (1, 2) assert _Interval.validate([1, 2]) == (1, 2) assert _Interval.validate((1 * u.m, 2 * u.m)) == (1 * u.m, 2 * u.m) assert _Interval.validate([1 * u.m, 2 * u.m]) == (1 * u.m, 2 * u.m) assert _Interval.validate(((1, 2),)) == (1, 2) assert _Interval.validate(([1, 2],)) == (1, 2) assert _Interval.validate([(1, 2)]) == (1, 2) assert _Interval.validate([[1, 2]]) == (1, 2) assert _Interval.validate(((1 * u.m, 2 * u.m),)) == (1 * u.m, 2 * u.m) assert _Interval.validate(([1 * u.m, 2 * u.m],)) == (1 * u.m, 2 * u.m) assert _Interval.validate([(1 * u.m, 2 * u.m)]) == (1 * u.m, 2 * u.m) assert _Interval.validate([[1 * u.m, 2 * u.m]]) == (1 * u.m, 2 * u.m) interval = _Interval.validate((np.array([-2.5, -3.5]), np.array([2.5, 3.5]))) assert (interval.lower == np.array([-2.5, -3.5])).all() assert (interval.upper == np.array([2.5, 3.5])).all() interval = _Interval.validate( (np.array([-2.5, -3.5, -4.5]), np.array([2.5, 3.5, 4.5])) ) assert (interval.lower == np.array([-2.5, -3.5, -4.5])).all() assert (interval.upper == np.array([2.5, 3.5, 4.5])).all() # Fail shape MESSAGE = r"An interval must be some sort of sequence of length 2" with pytest.raises(ValueError, match=MESSAGE): _Interval.validate((1, 2, 3)) # Fail bounds with pytest.warns(RuntimeWarning): _Interval.validate((2, 1)) def test_outside(self): interval = _Interval.validate((0, 1)) # fmt: off assert ( interval.outside(np.linspace(-1, 2, 13)) == [ True, True, True, True, False, False, False, False, False, True, True, True, True ] ).all() # fmt: on def test_domain(self): interval = _Interval.validate((0, 1)) assert (interval.domain(0.25) == np.linspace(0, 1, 5)).all() def test__ignored_interval(self): assert _ignored_interval.lower == -np.inf assert _ignored_interval.upper == np.inf for num in [0, -1, -100, 3.14, 10**100, -(10**100)]: assert not num < _ignored_interval[0] assert num > _ignored_interval[0] assert not num > _ignored_interval[1] assert num < _ignored_interval[1] assert not (_ignored_interval.outside(np.array([num]))).all() def test_validate_with_SpectralCoord(self): """Regression test for issue #12439""" lower = SpectralCoord(1, u.um) upper = SpectralCoord(10, u.um) interval = _Interval.validate((lower, upper)) assert interval.lower == lower assert interval.upper == upper class Test_BoundingDomain: def setup_method(self): class BoundingDomain(_BoundingDomain): def fix_inputs(self, model, fix_inputs): super().fix_inputs(model, fixed_inputs=fix_inputs) def prepare_inputs(self, input_shape, inputs): super().prepare_inputs(input_shape, inputs) self.BoundingDomain = BoundingDomain def test_create(self): model = mk.MagicMock() bounding_box = self.BoundingDomain(model) assert bounding_box._model == model assert bounding_box._ignored == [] assert bounding_box._order == "C" bounding_box = self.BoundingDomain(model, order="F") assert bounding_box._model == model assert bounding_box._ignored == [] assert bounding_box._order == "F" bounding_box = self.BoundingDomain(Gaussian2D(), ["x"]) assert bounding_box._ignored == [0] assert bounding_box._order == "C" # Error MESSAGE = r"order must be either 'C' .* or 'F' .*, got: .*" with pytest.raises(ValueError, match=MESSAGE): self.BoundingDomain(model, order=mk.MagicMock()) def test_model(self): model = mk.MagicMock() bounding_box = self.BoundingDomain(model) assert bounding_box._model == model assert bounding_box.model == model def test_order(self): bounding_box = self.BoundingDomain(mk.MagicMock(), order="C") assert bounding_box._order == "C" assert bounding_box.order == "C" bounding_box = self.BoundingDomain(mk.MagicMock(), order="F") assert bounding_box._order == "F" assert bounding_box.order == "F" bounding_box._order = "test" assert bounding_box.order == "test" def test_ignored(self): ignored = [0] model = mk.MagicMock() model.n_inputs = 1 model.inputs = ["x"] bounding_box = self.BoundingDomain(model, ignored=ignored) assert bounding_box._ignored == ignored assert bounding_box.ignored == ignored def test__get_order(self): bounding_box = self.BoundingDomain(mk.MagicMock()) # Success (default 'C') assert bounding_box._order == "C" assert bounding_box._get_order() == "C" assert bounding_box._get_order("C") == "C" assert bounding_box._get_order("F") == "F" # Success (default 'F') bounding_box._order = "F" assert bounding_box._order == "F" assert bounding_box._get_order() == "F" assert bounding_box._get_order("C") == "C" assert bounding_box._get_order("F") == "F" # Error MESSAGE = r"order must be either 'C' .* or 'F' .*, got: .*" with pytest.raises(ValueError, match=MESSAGE): bounding_box._get_order(mk.MagicMock()) def test__get_index(self): bounding_box = self.BoundingDomain(Gaussian2D()) # Pass input name assert bounding_box._get_index("x") == 0 assert bounding_box._get_index("y") == 1 # Pass invalid input name MESSAGE = r"'z' is not one of the inputs: .*" with pytest.raises(ValueError, match=MESSAGE): bounding_box._get_index("z") # Pass valid index assert bounding_box._get_index(0) == 0 assert bounding_box._get_index(1) == 1 assert bounding_box._get_index(np.int32(0)) == 0 assert bounding_box._get_index(np.int32(1)) == 1 assert bounding_box._get_index(np.int64(0)) == 0 assert bounding_box._get_index(np.int64(1)) == 1 # Pass invalid index MESSAGE = r"Integer key: .* must be non-negative and < 2" with pytest.raises(IndexError, match=MESSAGE): bounding_box._get_index(2) with pytest.raises(IndexError, match=MESSAGE): bounding_box._get_index(np.int32(2)) with pytest.raises(IndexError, match=MESSAGE): bounding_box._get_index(np.int64(2)) with pytest.raises(IndexError, match=MESSAGE): bounding_box._get_index(-1) # Pass invalid key MESSAGE = r"Key value: .* must be string or integer" with pytest.raises(ValueError, match=MESSAGE): bounding_box._get_index(mk.MagicMock()) def test__get_name(self): model = mk.MagicMock() model.n_inputs = 1 model.inputs = ["x"] bounding_box = self.BoundingDomain(model) index = mk.MagicMock() name = mk.MagicMock() model.inputs = mk.MagicMock() model.inputs.__getitem__.return_value = name assert bounding_box._get_name(index) == name assert model.inputs.__getitem__.call_args_list == [mk.call(index)] def test_ignored_inputs(self): model = mk.MagicMock() ignored = list(range(4, 8)) model.n_inputs = 8 model.inputs = [mk.MagicMock() for _ in range(8)] bounding_box = self.BoundingDomain(model, ignored=ignored) inputs = bounding_box.ignored_inputs assert isinstance(inputs, list) for index, _input in enumerate(inputs): assert _input in model.inputs assert model.inputs[index + 4] == _input for index, _input in enumerate(model.inputs): if _input in inputs: assert inputs[index - 4] == _input else: assert index < 4 def test__validate_ignored(self): bounding_box = self.BoundingDomain(Gaussian2D()) # Pass assert bounding_box._validate_ignored(None) == [] assert bounding_box._validate_ignored(["x", "y"]) == [0, 1] assert bounding_box._validate_ignored([0, 1]) == [0, 1] assert bounding_box._validate_ignored([np.int32(0), np.int64(1)]) == [0, 1] # Fail with pytest.raises( ValueError, match=r"Key value: .* must be string or integer" ): bounding_box._validate_ignored([mk.MagicMock()]) with pytest.raises(ValueError, match=r"'.*' is not one of the inputs: .*"): bounding_box._validate_ignored(["z"]) MESSAGE = r"Integer key: 3 must be non-negative and < 2" with pytest.raises(IndexError, match=MESSAGE): bounding_box._validate_ignored([3]) with pytest.raises(IndexError, match=MESSAGE): bounding_box._validate_ignored([np.int32(3)]) with pytest.raises(IndexError, match=MESSAGE): bounding_box._validate_ignored([np.int64(3)]) def test___call__(self): bounding_box = self.BoundingDomain(mk.MagicMock()) args = tuple(mk.MagicMock() for _ in range(3)) kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)} MESSAGE = ( r"This bounding box is fixed by the model and does not have adjustable" r" parameters" ) with pytest.raises(RuntimeError, match=MESSAGE): bounding_box(*args, **kwargs) def test_fix_inputs(self): bounding_box = self.BoundingDomain(mk.MagicMock()) model = mk.MagicMock() fixed_inputs = mk.MagicMock() with pytest.raises( NotImplementedError, match=r"This should be implemented by a child class" ): bounding_box.fix_inputs(model, fixed_inputs) def test__prepare_inputs(self): bounding_box = self.BoundingDomain(mk.MagicMock()) with pytest.raises( NotImplementedError, match=r"This has not been implemented for BoundingDomain", ): bounding_box.prepare_inputs(mk.MagicMock(), mk.MagicMock()) def test__base_ouput(self): bounding_box = self.BoundingDomain(mk.MagicMock()) # Simple shape input_shape = (13,) output = bounding_box._base_output(input_shape, 0) assert (output == 0).all() assert output.shape == input_shape output = bounding_box._base_output(input_shape, np.nan) assert (np.isnan(output)).all() assert output.shape == input_shape output = bounding_box._base_output(input_shape, 14) assert (output == 14).all() assert output.shape == input_shape # Complex shape input_shape = (13, 7) output = bounding_box._base_output(input_shape, 0) assert (output == 0).all() assert output.shape == input_shape output = bounding_box._base_output(input_shape, np.nan) assert (np.isnan(output)).all() assert output.shape == input_shape output = bounding_box._base_output(input_shape, 14) assert (output == 14).all() assert output.shape == input_shape def test__all_out_output(self): model = mk.MagicMock() bounding_box = self.BoundingDomain(model) # Simple shape model.n_outputs = 1 input_shape = (13,) output, output_unit = bounding_box._all_out_output(input_shape, 0) assert (np.array(output) == 0).all() assert np.array(output).shape == (1, 13) assert output_unit is None # Complex shape model.n_outputs = 6 input_shape = (13, 7) output, output_unit = bounding_box._all_out_output(input_shape, 0) assert (np.array(output) == 0).all() assert np.array(output).shape == (6, 13, 7) assert output_unit is None def test__modify_output(self): bounding_box = self.BoundingDomain(mk.MagicMock()) valid_index = mk.MagicMock() input_shape = mk.MagicMock() fill_value = mk.MagicMock() # Simple shape with mk.patch.object( _BoundingDomain, "_base_output", autospec=True, return_value=np.asanyarray(0), ) as mkBase: assert ( np.array([1, 2, 3]) == bounding_box._modify_output( [1, 2, 3], valid_index, input_shape, fill_value ) ).all() assert mkBase.call_args_list == [mk.call(input_shape, fill_value)] # Replacement with mk.patch.object( _BoundingDomain, "_base_output", autospec=True, return_value=np.array([1, 2, 3, 4, 5, 6]), ) as mkBase: assert ( np.array([7, 2, 8, 4, 9, 6]) == bounding_box._modify_output( [7, 8, 9], np.array([[0, 2, 4]]), input_shape, fill_value ) ).all() assert mkBase.call_args_list == [mk.call(input_shape, fill_value)] def test__prepare_outputs(self): bounding_box = self.BoundingDomain(mk.MagicMock()) valid_index = mk.MagicMock() input_shape = mk.MagicMock() fill_value = mk.MagicMock() valid_outputs = [mk.MagicMock() for _ in range(3)] effects = [mk.MagicMock() for _ in range(3)] with mk.patch.object( _BoundingDomain, "_modify_output", autospec=True, side_effect=effects ) as mkModify: assert effects == bounding_box._prepare_outputs( valid_outputs, valid_index, input_shape, fill_value ) assert mkModify.call_args_list == [ mk.call( bounding_box, valid_outputs[idx], valid_index, input_shape, fill_value, ) for idx in range(3) ] def test_prepare_outputs(self): model = mk.MagicMock() bounding_box = self.BoundingDomain(model) valid_outputs = mk.MagicMock() valid_index = mk.MagicMock() input_shape = mk.MagicMock() fill_value = mk.MagicMock() with mk.patch.object( _BoundingDomain, "_prepare_outputs", autospec=True ) as mkPrepare: # Reshape valid_outputs model.n_outputs = 1 assert mkPrepare.return_value == bounding_box.prepare_outputs( valid_outputs, valid_index, input_shape, fill_value ) assert mkPrepare.call_args_list == [ mk.call( bounding_box, [valid_outputs], valid_index, input_shape, fill_value ) ] mkPrepare.reset_mock() # No reshape valid_outputs model.n_outputs = 2 assert mkPrepare.return_value == bounding_box.prepare_outputs( valid_outputs, valid_index, input_shape, fill_value ) assert mkPrepare.call_args_list == [ mk.call( bounding_box, valid_outputs, valid_index, input_shape, fill_value ) ] def test__get_valid_outputs_unit(self): bounding_box = self.BoundingDomain(mk.MagicMock()) # Don't get unit assert bounding_box._get_valid_outputs_unit(mk.MagicMock(), False) is None # Get unit from unitless assert bounding_box._get_valid_outputs_unit(7, True) is None # Get unit assert bounding_box._get_valid_outputs_unit(25 * u.m, True) == u.m def test__evaluate_model(self): bounding_box = self.BoundingDomain(mk.MagicMock()) evaluate = mk.MagicMock() valid_inputs = mk.MagicMock() input_shape = mk.MagicMock() valid_index = mk.MagicMock() fill_value = mk.MagicMock() with_units = mk.MagicMock() with mk.patch.object( _BoundingDomain, "_get_valid_outputs_unit", autospec=True ) as mkGet: with mk.patch.object( _BoundingDomain, "prepare_outputs", autospec=True ) as mkPrepare: assert bounding_box._evaluate_model( evaluate, valid_inputs, valid_index, input_shape, fill_value, with_units, ) == (mkPrepare.return_value, mkGet.return_value) assert mkPrepare.call_args_list == [ mk.call( bounding_box, evaluate.return_value, valid_index, input_shape, fill_value, ) ] assert mkGet.call_args_list == [ mk.call(evaluate.return_value, with_units) ] assert evaluate.call_args_list == [mk.call(valid_inputs)] def test__evaluate(self): bounding_box = self.BoundingDomain(mk.MagicMock()) evaluate = mk.MagicMock() inputs = mk.MagicMock() input_shape = mk.MagicMock() fill_value = mk.MagicMock() with_units = mk.MagicMock() valid_inputs = mk.MagicMock() valid_index = mk.MagicMock() effects = [ (valid_inputs, valid_index, True), (valid_inputs, valid_index, False), ] with mk.patch.object( self.BoundingDomain, "prepare_inputs", autospec=True, side_effect=effects ) as mkPrepare: with mk.patch.object( _BoundingDomain, "_all_out_output", autospec=True ) as mkAll: with mk.patch.object( _BoundingDomain, "_evaluate_model", autospec=True ) as mkEvaluate: # all_out assert ( bounding_box._evaluate( evaluate, inputs, input_shape, fill_value, with_units ) == mkAll.return_value ) assert mkAll.call_args_list == [ mk.call(bounding_box, input_shape, fill_value) ] assert mkEvaluate.call_args_list == [] assert mkPrepare.call_args_list == [ mk.call(bounding_box, input_shape, inputs) ] mkAll.reset_mock() mkPrepare.reset_mock() # not all_out assert ( bounding_box._evaluate( evaluate, inputs, input_shape, fill_value, with_units ) == mkEvaluate.return_value ) assert mkAll.call_args_list == [] assert mkEvaluate.call_args_list == [ mk.call( bounding_box, evaluate, valid_inputs, valid_index, input_shape, fill_value, with_units, ) ] assert mkPrepare.call_args_list == [ mk.call(bounding_box, input_shape, inputs) ] def test__set_outputs_unit(self): bounding_box = self.BoundingDomain(mk.MagicMock()) # set no unit assert 27 == bounding_box._set_outputs_unit(27, None) # set unit assert 27 * u.m == bounding_box._set_outputs_unit(27, u.m) def test_evaluate(self): bounding_box = self.BoundingDomain(Gaussian2D()) evaluate = mk.MagicMock() inputs = mk.MagicMock() fill_value = mk.MagicMock() outputs = mk.MagicMock() valid_outputs_unit = mk.MagicMock() value = (outputs, valid_outputs_unit) with mk.patch.object( _BoundingDomain, "_evaluate", autospec=True, return_value=value ) as mkEvaluate: with mk.patch.object( _BoundingDomain, "_set_outputs_unit", autospec=True ) as mkSet: with mk.patch.object(Model, "input_shape", autospec=True) as mkShape: with mk.patch.object( Model, "bbox_with_units", new_callable=mk.PropertyMock ) as mkUnits: assert tuple(mkSet.return_value) == bounding_box.evaluate( evaluate, inputs, fill_value ) assert mkSet.call_args_list == [ mk.call(outputs, valid_outputs_unit) ] assert mkEvaluate.call_args_list == [ mk.call( bounding_box, evaluate, inputs, mkShape.return_value, fill_value, mkUnits.return_value, ) ] assert mkShape.call_args_list == [ mk.call(bounding_box._model, inputs) ] assert mkUnits.call_args_list == [mk.call()] class TestModelBoundingBox: def test_create(self): intervals = () model = mk.MagicMock() bounding_box = ModelBoundingBox(intervals, model) assert isinstance(bounding_box, _BoundingDomain) assert bounding_box._intervals == {} assert bounding_box._model == model assert bounding_box._ignored == [] assert bounding_box._order == "C" # Set optional intervals = {} model = mk.MagicMock() bounding_box = ModelBoundingBox(intervals, model, order="F") assert isinstance(bounding_box, _BoundingDomain) assert bounding_box._intervals == {} assert bounding_box._model == model assert bounding_box._ignored == [] assert bounding_box._order == "F" # Set interval intervals = (1, 2) model = mk.MagicMock() model.n_inputs = 1 model.inputs = ["x"] bounding_box = ModelBoundingBox(intervals, model) assert isinstance(bounding_box, _BoundingDomain) assert bounding_box._intervals == {0: (1, 2)} assert bounding_box._model == model # Set ignored intervals = (1, 2) model = mk.MagicMock() model.n_inputs = 2 model.inputs = ["x", "y"] bounding_box = ModelBoundingBox(intervals, model, ignored=[1]) assert isinstance(bounding_box, _BoundingDomain) assert bounding_box._intervals == {0: (1, 2)} assert bounding_box._model == model assert bounding_box._ignored == [1] intervals = ((1, 2), (3, 4)) model = mk.MagicMock() model.n_inputs = 3 model.inputs = ["x", "y", "z"] bounding_box = ModelBoundingBox(intervals, model, ignored=[2], order="F") assert isinstance(bounding_box, _BoundingDomain) assert bounding_box._intervals == {0: (1, 2), 1: (3, 4)} assert bounding_box._model == model assert bounding_box._ignored == [2] assert bounding_box._order == "F" def test_copy(self): bounding_box = ModelBoundingBox.validate( Gaussian2D(), ((-4.5, 4.5), (-1.4, 1.4)) ) copy = bounding_box.copy() assert bounding_box == copy assert id(bounding_box) != id(copy) assert bounding_box.ignored == copy.ignored assert id(bounding_box.ignored) != id(copy.ignored) # model is not copied to prevent infinite recursion assert bounding_box._model == copy._model assert id(bounding_box._model) == id(copy._model) # Same string values have will have same id assert bounding_box._order == copy._order assert id(bounding_box._order) == id(copy._order) # Check interval objects for index, interval in bounding_box.intervals.items(): assert interval == copy.intervals[index] assert id(interval) != id(copy.intervals[index]) # Same float values have will have same id assert interval.lower == copy.intervals[index].lower assert id(interval.lower) == id(copy.intervals[index].lower) # Same float values have will have same id assert interval.upper == copy.intervals[index].upper assert id(interval.upper) == id(copy.intervals[index].upper) assert len(bounding_box.intervals) == len(copy.intervals) assert bounding_box.intervals.keys() == copy.intervals.keys() def test_intervals(self): intervals = {0: _Interval(1, 2)} model = mk.MagicMock() model.n_inputs = 1 model.inputs = ["x"] bounding_box = ModelBoundingBox(intervals, model) assert bounding_box._intervals == intervals assert bounding_box.intervals == intervals def test_named_intervals(self): intervals = {idx: _Interval(idx, idx + 1) for idx in range(4)} model = mk.MagicMock() model.n_inputs = 4 model.inputs = [mk.MagicMock() for _ in range(4)] bounding_box = ModelBoundingBox(intervals, model) named = bounding_box.named_intervals assert isinstance(named, dict) for name, interval in named.items(): assert name in model.inputs assert intervals[model.inputs.index(name)] == interval for index, name in enumerate(model.inputs): assert index in intervals assert name in named assert intervals[index] == named[name] def test___repr__(self): intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)} model = Gaussian2D() bounding_box = ModelBoundingBox.validate(model, intervals) assert ( bounding_box.__repr__() == "ModelBoundingBox(\n" " intervals={\n" " x: Interval(lower=-1, upper=1)\n" " y: Interval(lower=-4, upper=4)\n" " }\n" " model=Gaussian2D(inputs=('x', 'y'))\n" " order='C'\n" ")" ) intervals = {0: _Interval(-1, 1)} model = Gaussian2D() bounding_box = ModelBoundingBox.validate(model, intervals, ignored=["y"]) assert ( bounding_box.__repr__() == "ModelBoundingBox(\n" " intervals={\n" " x: Interval(lower=-1, upper=1)\n" " }\n" " ignored=['y']\n" " model=Gaussian2D(inputs=('x', 'y'))\n" " order='C'\n" ")" ) def test___len__(self): intervals = {0: _Interval(-1, 1)} model = Gaussian1D() bounding_box = ModelBoundingBox.validate(model, intervals) assert len(bounding_box) == 1 == len(bounding_box._intervals) intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)} model = Gaussian2D() bounding_box = ModelBoundingBox.validate(model, intervals) assert len(bounding_box) == 2 == len(bounding_box._intervals) bounding_box._intervals = {} assert len(bounding_box) == 0 == len(bounding_box._intervals) def test___contains__(self): intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)} model = Gaussian2D() bounding_box = ModelBoundingBox.validate(model, intervals) # Contains with keys assert "x" in bounding_box assert "y" in bounding_box assert "z" not in bounding_box # Contains with index assert 0 in bounding_box assert 1 in bounding_box assert 2 not in bounding_box # General not in assert mk.MagicMock() not in bounding_box # Contains with ignored del bounding_box["y"] # Contains with keys assert "x" in bounding_box assert "y" in bounding_box assert "z" not in bounding_box # Contains with index assert 0 in bounding_box assert 1 in bounding_box assert 2 not in bounding_box def test___getitem__(self): intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)} model = Gaussian2D() bounding_box = ModelBoundingBox.validate(model, intervals) # Get using input key assert bounding_box["x"] == (-1, 1) assert bounding_box["y"] == (-4, 4) # Fail with input key with pytest.raises(ValueError, match=r"'.*' is not one of the inputs: .*"): bounding_box["z"] # Get using index assert bounding_box[0] == (-1, 1) assert bounding_box[1] == (-4, 4) assert bounding_box[np.int32(0)] == (-1, 1) assert bounding_box[np.int32(1)] == (-4, 4) assert bounding_box[np.int64(0)] == (-1, 1) assert bounding_box[np.int64(1)] == (-4, 4) # Fail with index MESSAGE = r"Integer key: 2 must be non-negative and < 2" with pytest.raises(IndexError, match=MESSAGE): bounding_box[2] with pytest.raises(IndexError, match=MESSAGE): bounding_box[np.int32(2)] with pytest.raises(IndexError, match=MESSAGE): bounding_box[np.int64(2)] # get ignored interval del bounding_box[0] assert bounding_box[0] == _ignored_interval assert bounding_box[1] == (-4, 4) del bounding_box[1] assert bounding_box[0] == _ignored_interval assert bounding_box[1] == _ignored_interval def test_bounding_box(self): # 0D model = Gaussian1D() bounding_box = ModelBoundingBox.validate(model, {}, ignored=["x"]) assert bounding_box.bounding_box() == (-np.inf, np.inf) assert bounding_box.bounding_box("C") == (-np.inf, np.inf) assert bounding_box.bounding_box("F") == (-np.inf, np.inf) # 1D intervals = {0: _Interval(-1, 1)} model = Gaussian1D() bounding_box = ModelBoundingBox.validate(model, intervals) assert bounding_box.bounding_box() == (-1, 1) assert bounding_box.bounding_box(mk.MagicMock()) == (-1, 1) # > 1D intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)} model = Gaussian2D() bounding_box = ModelBoundingBox.validate(model, intervals) assert bounding_box.bounding_box() == ((-4, 4), (-1, 1)) assert bounding_box.bounding_box("C") == ((-4, 4), (-1, 1)) assert bounding_box.bounding_box("F") == ((-1, 1), (-4, 4)) def test___eq__(self): intervals = {0: _Interval(-1, 1)} model = Gaussian1D() bounding_box = ModelBoundingBox.validate(model.copy(), intervals.copy()) assert bounding_box == bounding_box assert bounding_box == ModelBoundingBox.validate(model.copy(), intervals.copy()) assert bounding_box == (-1, 1) assert not (bounding_box == mk.MagicMock()) assert not (bounding_box == (-2, 2)) assert not ( bounding_box == ModelBoundingBox.validate(model, {0: _Interval(-2, 2)}) ) # Respect ordering intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)} model = Gaussian2D() bounding_box_1 = ModelBoundingBox.validate(model, intervals) bounding_box_2 = ModelBoundingBox.validate(model, intervals, order="F") assert bounding_box_1._order == "C" assert bounding_box_1 == ((-4, 4), (-1, 1)) assert not (bounding_box_1 == ((-1, 1), (-4, 4))) assert bounding_box_2._order == "F" assert not (bounding_box_2 == ((-4, 4), (-1, 1))) assert bounding_box_2 == ((-1, 1), (-4, 4)) assert bounding_box_1 == bounding_box_2 # Respect ignored model = Gaussian2D() bounding_box_1._ignored = [mk.MagicMock()] bounding_box_2._ignored = [mk.MagicMock()] assert bounding_box_1._ignored != bounding_box_2._ignored assert not (bounding_box_1 == bounding_box_2) def test__setitem__(self): model = Gaussian2D() bounding_box = ModelBoundingBox.validate(model, {}, ignored=[0, 1]) assert bounding_box._ignored == [0, 1] # USING Intervals directly # Set interval using key assert 0 not in bounding_box.intervals assert 0 in bounding_box.ignored bounding_box["x"] = _Interval(-1, 1) assert 0 in bounding_box.intervals assert 0 not in bounding_box.ignored assert isinstance(bounding_box["x"], _Interval) assert bounding_box["x"] == (-1, 1) assert 1 not in bounding_box.intervals assert 1 in bounding_box.ignored bounding_box["y"] = _Interval(-4, 4) assert 1 in bounding_box.intervals assert 1 not in bounding_box.ignored assert isinstance(bounding_box["y"], _Interval) assert bounding_box["y"] == (-4, 4) del bounding_box["x"] del bounding_box["y"] # Set interval using index assert 0 not in bounding_box.intervals assert 0 in bounding_box.ignored bounding_box[0] = _Interval(-1, 1) assert 0 in bounding_box.intervals assert 0 not in bounding_box.ignored assert isinstance(bounding_box[0], _Interval) assert bounding_box[0] == (-1, 1) assert 1 not in bounding_box.intervals assert 1 in bounding_box.ignored bounding_box[1] = _Interval(-4, 4) assert 1 in bounding_box.intervals assert 1 not in bounding_box.ignored assert isinstance(bounding_box[1], _Interval) assert bounding_box[1] == (-4, 4) del bounding_box[0] del bounding_box[1] # USING tuples # Set interval using key assert 0 not in bounding_box.intervals assert 0 in bounding_box.ignored bounding_box["x"] = (-1, 1) assert 0 in bounding_box.intervals assert 0 not in bounding_box.ignored assert isinstance(bounding_box["x"], _Interval) assert bounding_box["x"] == (-1, 1) assert 1 not in bounding_box.intervals assert 1 in bounding_box.ignored bounding_box["y"] = (-4, 4) assert 1 in bounding_box.intervals assert 1 not in bounding_box.ignored assert isinstance(bounding_box["y"], _Interval) assert bounding_box["y"] == (-4, 4) del bounding_box["x"] del bounding_box["y"] # Set interval using index assert 0 not in bounding_box.intervals assert 0 in bounding_box.ignored bounding_box[0] = (-1, 1) assert 0 in bounding_box.intervals assert 0 not in bounding_box.ignored assert isinstance(bounding_box[0], _Interval) assert bounding_box[0] == (-1, 1) assert 1 not in bounding_box.intervals assert 1 in bounding_box.ignored bounding_box[1] = (-4, 4) assert 1 in bounding_box.intervals assert 1 not in bounding_box.ignored assert isinstance(bounding_box[1], _Interval) assert bounding_box[1] == (-4, 4) # Model set support model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2) bounding_box = ModelBoundingBox({}, model) # USING Intervals directly # Set interval using key assert "x" not in bounding_box bounding_box["x"] = _Interval(np.array([-1, -2]), np.array([1, 2])) assert "x" in bounding_box assert isinstance(bounding_box["x"], _Interval) assert (bounding_box["x"].lower == np.array([-1, -2])).all() assert (bounding_box["x"].upper == np.array([1, 2])).all() # Set interval using index bounding_box._intervals = {} assert 0 not in bounding_box bounding_box[0] = _Interval(np.array([-1, -2]), np.array([1, 2])) assert 0 in bounding_box assert isinstance(bounding_box[0], _Interval) assert (bounding_box[0].lower == np.array([-1, -2])).all() assert (bounding_box[0].upper == np.array([1, 2])).all() # USING tuples # Set interval using key bounding_box._intervals = {} assert "x" not in bounding_box bounding_box["x"] = (np.array([-1, -2]), np.array([1, 2])) assert "x" in bounding_box assert isinstance(bounding_box["x"], _Interval) assert (bounding_box["x"].lower == np.array([-1, -2])).all() assert (bounding_box["x"].upper == np.array([1, 2])).all() # Set interval using index bounding_box._intervals = {} assert 0 not in bounding_box bounding_box[0] = (np.array([-1, -2]), np.array([1, 2])) assert 0 in bounding_box assert isinstance(bounding_box[0], _Interval) assert (bounding_box[0].lower == np.array([-1, -2])).all() assert (bounding_box[0].upper == np.array([1, 2])).all() def test___delitem__(self): intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)} model = Gaussian2D() bounding_box = ModelBoundingBox.validate(model, intervals) # Using index assert 0 in bounding_box.intervals assert 0 not in bounding_box.ignored assert 0 in bounding_box assert "x" in bounding_box del bounding_box[0] assert 0 not in bounding_box.intervals assert 0 in bounding_box.ignored assert 0 in bounding_box assert "x" in bounding_box # Delete an ignored item with pytest.raises(RuntimeError, match=r"Cannot delete ignored input: 0!"): del bounding_box[0] # Using key assert 1 in bounding_box.intervals assert 1 not in bounding_box.ignored assert 0 in bounding_box assert "y" in bounding_box del bounding_box["y"] assert 1 not in bounding_box.intervals assert 1 in bounding_box.ignored assert 0 in bounding_box assert "y" in bounding_box # Delete an ignored item with pytest.raises(RuntimeError, match=r"Cannot delete ignored input: y!"): del bounding_box["y"] def test__validate_dict(self): model = Gaussian2D() bounding_box = ModelBoundingBox({}, model) # Input name keys intervals = {"x": _Interval(-1, 1), "y": _Interval(-4, 4)} assert "x" not in bounding_box assert "y" not in bounding_box bounding_box._validate_dict(intervals) assert "x" in bounding_box assert bounding_box["x"] == (-1, 1) assert "y" in bounding_box assert bounding_box["y"] == (-4, 4) assert len(bounding_box.intervals) == 2 # Input index bounding_box._intervals = {} intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)} assert 0 not in bounding_box assert 1 not in bounding_box bounding_box._validate_dict(intervals) assert 0 in bounding_box assert bounding_box[0] == (-1, 1) assert 1 in bounding_box assert bounding_box[1] == (-4, 4) assert len(bounding_box.intervals) == 2 # Model set support model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2) bounding_box = ModelBoundingBox({}, model) # name keys intervals = {"x": _Interval(np.array([-1, -2]), np.array([1, 2]))} assert "x" not in bounding_box bounding_box._validate_dict(intervals) assert "x" in bounding_box assert (bounding_box["x"].lower == np.array([-1, -2])).all() assert (bounding_box["x"].upper == np.array([1, 2])).all() # input index bounding_box._intervals = {} intervals = {0: _Interval(np.array([-1, -2]), np.array([1, 2]))} assert 0 not in bounding_box bounding_box._validate_dict(intervals) assert 0 in bounding_box assert (bounding_box[0].lower == np.array([-1, -2])).all() assert (bounding_box[0].upper == np.array([1, 2])).all() def test__validate_sequence(self): model = Gaussian2D() bounding_box = ModelBoundingBox({}, model) # Default order assert "x" not in bounding_box assert "y" not in bounding_box bounding_box._validate_sequence(((-4, 4), (-1, 1))) assert "x" in bounding_box assert bounding_box["x"] == (-1, 1) assert "y" in bounding_box assert bounding_box["y"] == (-4, 4) assert len(bounding_box.intervals) == 2 # C order bounding_box._intervals = {} assert "x" not in bounding_box assert "y" not in bounding_box bounding_box._validate_sequence(((-4, 4), (-1, 1)), order="C") assert "x" in bounding_box assert bounding_box["x"] == (-1, 1) assert "y" in bounding_box assert bounding_box["y"] == (-4, 4) assert len(bounding_box.intervals) == 2 # Fortran order bounding_box._intervals = {} assert "x" not in bounding_box assert "y" not in bounding_box bounding_box._validate_sequence(((-4, 4), (-1, 1)), order="F") assert "x" in bounding_box assert bounding_box["x"] == (-4, 4) assert "y" in bounding_box assert bounding_box["y"] == (-1, 1) assert len(bounding_box.intervals) == 2 # Invalid order bounding_box._intervals = {} assert "x" not in bounding_box assert "y" not in bounding_box MESSAGE = r"order must be either 'C' .* or 'F' .*, got: .*" with pytest.raises(ValueError, match=MESSAGE): bounding_box._validate_sequence(((-4, 4), (-1, 1)), order=mk.MagicMock()) assert "x" not in bounding_box assert "y" not in bounding_box assert len(bounding_box.intervals) == 0 def test__n_inputs(self): model = Gaussian2D() intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)} bounding_box = ModelBoundingBox.validate(model, intervals) assert bounding_box._n_inputs == 2 intervals = {0: _Interval(-1, 1)} bounding_box = ModelBoundingBox.validate(model, intervals, ignored=["y"]) assert bounding_box._n_inputs == 1 bounding_box = ModelBoundingBox.validate(model, {}, ignored=["x", "y"]) assert bounding_box._n_inputs == 0 bounding_box._ignored = ["x", "y", "z"] assert bounding_box._n_inputs == 0 def test__validate_iterable(self): model = Gaussian2D() bounding_box = ModelBoundingBox({}, model) # Pass sequence Default order assert "x" not in bounding_box assert "y" not in bounding_box bounding_box._validate_iterable(((-4, 4), (-1, 1))) assert "x" in bounding_box assert bounding_box["x"] == (-1, 1) assert "y" in bounding_box assert bounding_box["y"] == (-4, 4) assert len(bounding_box.intervals) == 2 # Pass sequence bounding_box._intervals = {} assert "x" not in bounding_box assert "y" not in bounding_box bounding_box._validate_iterable(((-4, 4), (-1, 1)), order="F") assert "x" in bounding_box assert bounding_box["x"] == (-4, 4) assert "y" in bounding_box assert bounding_box["y"] == (-1, 1) assert len(bounding_box.intervals) == 2 # Pass Dict bounding_box._intervals = {} intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)} assert 0 not in bounding_box assert 1 not in bounding_box bounding_box._validate_iterable(intervals) assert 0 in bounding_box assert bounding_box[0] == (-1, 1) assert 1 in bounding_box assert bounding_box[1] == (-4, 4) assert len(bounding_box.intervals) == 2 # Pass with ignored bounding_box._intervals = {} bounding_box._ignored = [1] intervals = {0: _Interval(-1, 1)} assert 0 not in bounding_box.intervals bounding_box._validate_iterable(intervals) assert 0 in bounding_box.intervals assert bounding_box[0] == (-1, 1) # Invalid iterable MESSAGE = "Found {} intervals, but must have exactly {}" bounding_box._intervals = {} bounding_box._ignored = [] assert "x" not in bounding_box assert "y" not in bounding_box with pytest.raises(ValueError, match=MESSAGE.format(3, 2)): bounding_box._validate_iterable(((-4, 4), (-1, 1), (-3, 3))) assert len(bounding_box.intervals) == 0 assert "x" not in bounding_box assert "y" not in bounding_box bounding_box._ignored = [1] intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)} with pytest.raises(ValueError, match=MESSAGE.format(2, 1)): bounding_box._validate_iterable(intervals) assert len(bounding_box.intervals) == 0 bounding_box._ignored = [] intervals = {0: _Interval(-1, 1)} with pytest.raises(ValueError, match=MESSAGE.format(1, 2)): bounding_box._validate_iterable(intervals) assert "x" not in bounding_box assert "y" not in bounding_box assert len(bounding_box.intervals) == 0 def test__validate(self): model = Gaussian2D() bounding_box = ModelBoundingBox({}, model) # Pass sequence Default order assert "x" not in bounding_box assert "y" not in bounding_box bounding_box._validate(((-4, 4), (-1, 1))) assert "x" in bounding_box assert bounding_box["x"] == (-1, 1) assert "y" in bounding_box assert bounding_box["y"] == (-4, 4) assert len(bounding_box.intervals) == 2 # Pass sequence bounding_box._intervals = {} assert "x" not in bounding_box assert "y" not in bounding_box bounding_box._validate(((-4, 4), (-1, 1)), order="F") assert "x" in bounding_box assert bounding_box["x"] == (-4, 4) assert "y" in bounding_box assert bounding_box["y"] == (-1, 1) assert len(bounding_box.intervals) == 2 # Pass Dict bounding_box._intervals = {} intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)} assert "x" not in bounding_box assert "y" not in bounding_box bounding_box._validate(intervals) assert 0 in bounding_box assert bounding_box[0] == (-1, 1) assert 1 in bounding_box assert bounding_box[1] == (-4, 4) assert len(bounding_box.intervals) == 2 # Pass single with ignored intervals = {0: _Interval(-1, 1)} bounding_box = ModelBoundingBox({}, model, ignored=[1]) assert 0 not in bounding_box.intervals assert 1 not in bounding_box.intervals bounding_box._validate(intervals) assert 0 in bounding_box.intervals assert bounding_box[0] == (-1, 1) assert 1 not in bounding_box.intervals assert len(bounding_box.intervals) == 1 # Pass single model = Gaussian1D() bounding_box = ModelBoundingBox({}, model) assert "x" not in bounding_box bounding_box._validate((-1, 1)) assert "x" in bounding_box assert bounding_box["x"] == (-1, 1) assert len(bounding_box.intervals) == 1 # Model set support model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2) bounding_box = ModelBoundingBox({}, model) sequence = (np.array([-1, -2]), np.array([1, 2])) assert "x" not in bounding_box bounding_box._validate(sequence) assert "x" in bounding_box assert (bounding_box["x"].lower == np.array([-1, -2])).all() assert (bounding_box["x"].upper == np.array([1, 2])).all() def test_validate(self): model = Gaussian2D() kwargs = {"test": mk.MagicMock()} # Pass sequence Default order bounding_box = ModelBoundingBox.validate(model, ((-4, 4), (-1, 1)), **kwargs) assert (bounding_box._model.parameters == model.parameters).all() assert "x" in bounding_box assert bounding_box["x"] == (-1, 1) assert "y" in bounding_box assert bounding_box["y"] == (-4, 4) assert len(bounding_box.intervals) == 2 # Pass sequence bounding_box = ModelBoundingBox.validate( model, ((-4, 4), (-1, 1)), order="F", **kwargs ) assert (bounding_box._model.parameters == model.parameters).all() assert "x" in bounding_box assert bounding_box["x"] == (-4, 4) assert "y" in bounding_box assert bounding_box["y"] == (-1, 1) assert len(bounding_box.intervals) == 2 # Pass Dict intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)} bounding_box = ModelBoundingBox.validate(model, intervals, order="F", **kwargs) assert (bounding_box._model.parameters == model.parameters).all() assert 0 in bounding_box assert bounding_box[0] == (-1, 1) assert 1 in bounding_box assert bounding_box[1] == (-4, 4) assert len(bounding_box.intervals) == 2 assert bounding_box.order == "F" # Pass ModelBoundingBox bbox = bounding_box bounding_box = ModelBoundingBox.validate(model, bbox, **kwargs) assert (bounding_box._model.parameters == model.parameters).all() assert 0 in bounding_box assert bounding_box[0] == (-1, 1) assert 1 in bounding_box assert bounding_box[1] == (-4, 4) assert len(bounding_box.intervals) == 2 assert bounding_box.order == "F" # Pass single ignored intervals = {0: _Interval(-1, 1)} bounding_box = ModelBoundingBox.validate( model, intervals, ignored=["y"], **kwargs ) assert (bounding_box._model.parameters == model.parameters).all() assert "x" in bounding_box assert bounding_box["x"] == (-1, 1) assert "y" in bounding_box assert bounding_box["y"] == _ignored_interval assert len(bounding_box.intervals) == 1 # Pass single bounding_box = ModelBoundingBox.validate(Gaussian1D(), (-1, 1), **kwargs) assert (bounding_box._model.parameters == Gaussian1D().parameters).all() assert "x" in bounding_box assert bounding_box["x"] == (-1, 1) assert len(bounding_box.intervals) == 1 # Model set support model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2) sequence = (np.array([-1, -2]), np.array([1, 2])) bounding_box = ModelBoundingBox.validate(model, sequence, **kwargs) assert "x" in bounding_box assert (bounding_box["x"].lower == np.array([-1, -2])).all() assert (bounding_box["x"].upper == np.array([1, 2])).all() def test_fix_inputs(self): bounding_box = ModelBoundingBox.validate(Gaussian2D(), ((-4, 4), (-1, 1))) # keep_ignored = False (default) new_bounding_box = bounding_box.fix_inputs(Gaussian1D(), {1: mk.MagicMock()}) assert not (bounding_box == new_bounding_box) assert (new_bounding_box._model.parameters == Gaussian1D().parameters).all() assert "x" in new_bounding_box assert new_bounding_box["x"] == (-1, 1) assert "y" not in new_bounding_box assert len(new_bounding_box.intervals) == 1 assert new_bounding_box.ignored == [] # keep_ignored = True new_bounding_box = bounding_box.fix_inputs( Gaussian2D(), {1: mk.MagicMock()}, _keep_ignored=True ) assert not (bounding_box == new_bounding_box) assert (new_bounding_box._model.parameters == Gaussian2D().parameters).all() assert "x" in new_bounding_box assert new_bounding_box["x"] == (-1, 1) assert "y" in new_bounding_box assert "y" in new_bounding_box.ignored_inputs assert len(new_bounding_box.intervals) == 1 assert new_bounding_box.ignored == [1] def test_dimension(self): intervals = {0: _Interval(-1, 1)} model = Gaussian1D() bounding_box = ModelBoundingBox.validate(model, intervals) assert bounding_box.dimension == 1 == len(bounding_box._intervals) intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)} model = Gaussian2D() bounding_box = ModelBoundingBox.validate(model, intervals) assert bounding_box.dimension == 2 == len(bounding_box._intervals) bounding_box._intervals = {} assert bounding_box.dimension == 0 == len(bounding_box._intervals) def test_domain(self): intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)} model = Gaussian2D() bounding_box = ModelBoundingBox.validate(model, intervals) # test defaults assert ( np.array(bounding_box.domain(0.25)) == np.array([np.linspace(0, 2, 9), np.linspace(-1, 1, 9)]) ).all() # test C order assert ( np.array(bounding_box.domain(0.25, "C")) == np.array([np.linspace(0, 2, 9), np.linspace(-1, 1, 9)]) ).all() # test Fortran order assert ( np.array(bounding_box.domain(0.25, "F")) == np.array([np.linspace(-1, 1, 9), np.linspace(0, 2, 9)]) ).all() # test error order MESSAGE = r"order must be either 'C' .* or 'F' .*, got: .*" with pytest.raises(ValueError, match=MESSAGE): bounding_box.domain(0.25, mk.MagicMock()) def test__outside(self): intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)} model = Gaussian2D() bounding_box = ModelBoundingBox.validate(model, intervals) # Normal array input, all inside x = np.linspace(-1, 1, 13) y = np.linspace(0, 2, 13) input_shape = x.shape inputs = (x, y) outside_index, all_out = bounding_box._outside(input_shape, inputs) assert (outside_index == [False for _ in range(13)]).all() assert not all_out and isinstance(all_out, bool) # Normal array input, some inside and some outside x = np.linspace(-2, 1, 13) y = np.linspace(0, 3, 13) input_shape = x.shape inputs = (x, y) outside_index, all_out = bounding_box._outside(input_shape, inputs) # fmt: off assert ( outside_index == [ True, True, True, True, False, False, False, False, False, True, True, True, True ] ).all() # fmt: on assert not all_out and isinstance(all_out, bool) # Normal array input, all outside x = np.linspace(2, 3, 13) y = np.linspace(-2, -1, 13) input_shape = x.shape inputs = (x, y) outside_index, all_out = bounding_box._outside(input_shape, inputs) assert (outside_index == [True for _ in range(13)]).all() assert all_out and isinstance(all_out, bool) # Scalar input inside bounding_box inputs = (0.5, 0.5) input_shape = (1,) outside_index, all_out = bounding_box._outside(input_shape, inputs) assert (outside_index == [False]).all() assert not all_out and isinstance(all_out, bool) # Scalar input outside bounding_box inputs = (2, -1) input_shape = (1,) outside_index, all_out = bounding_box._outside(input_shape, inputs) assert (outside_index == [True]).all() assert all_out and isinstance(all_out, bool) def test__valid_index(self): intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)} model = Gaussian2D() bounding_box = ModelBoundingBox.validate(model, intervals) # Normal array input, all inside x = np.linspace(-1, 1, 13) y = np.linspace(0, 2, 13) input_shape = x.shape inputs = (x, y) valid_index, all_out = bounding_box._valid_index(input_shape, inputs) assert len(valid_index) == 1 assert (valid_index[0] == [idx for idx in range(13)]).all() assert not all_out and isinstance(all_out, bool) # Normal array input, some inside and some outside x = np.linspace(-2, 1, 13) y = np.linspace(0, 3, 13) input_shape = x.shape inputs = (x, y) valid_index, all_out = bounding_box._valid_index(input_shape, inputs) assert len(valid_index) == 1 assert (valid_index[0] == [4, 5, 6, 7, 8]).all() assert not all_out and isinstance(all_out, bool) # Normal array input, all outside x = np.linspace(2, 3, 13) y = np.linspace(-2, -1, 13) input_shape = x.shape inputs = (x, y) valid_index, all_out = bounding_box._valid_index(input_shape, inputs) assert len(valid_index) == 1 assert (valid_index[0] == []).all() assert all_out and isinstance(all_out, bool) # Scalar input inside bounding_box inputs = (0.5, 0.5) input_shape = (1,) valid_index, all_out = bounding_box._valid_index(input_shape, inputs) assert len(valid_index) == 1 assert (valid_index[0] == [0]).all() assert not all_out and isinstance(all_out, bool) # Scalar input outside bounding_box inputs = (2, -1) input_shape = (1,) valid_index, all_out = bounding_box._valid_index(input_shape, inputs) assert len(valid_index) == 1 assert (valid_index[0] == []).all() assert all_out and isinstance(all_out, bool) def test_prepare_inputs(self): intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)} model = Gaussian2D() bounding_box = ModelBoundingBox.validate(model, intervals) # Normal array input, all inside x = np.linspace(-1, 1, 13) y = np.linspace(0, 2, 13) input_shape = x.shape inputs = (x, y) new_inputs, valid_index, all_out = bounding_box.prepare_inputs( input_shape, inputs ) assert (np.array(new_inputs) == np.array(inputs)).all() assert len(valid_index) == 1 assert (valid_index[0] == [idx for idx in range(13)]).all() assert not all_out and isinstance(all_out, bool) # Normal array input, some inside and some outside x = np.linspace(-2, 1, 13) y = np.linspace(0, 3, 13) input_shape = x.shape inputs = (x, y) new_inputs, valid_index, all_out = bounding_box.prepare_inputs( input_shape, inputs ) assert ( np.array(new_inputs) == np.array( [ [x[4], x[5], x[6], x[7], x[8]], [y[4], y[5], y[6], y[7], y[8]], ] ) ).all() assert len(valid_index) == 1 assert (valid_index[0] == [4, 5, 6, 7, 8]).all() assert not all_out and isinstance(all_out, bool) # Normal array input, all outside x = np.linspace(2, 3, 13) y = np.linspace(-2, -1, 13) input_shape = x.shape inputs = (x, y) new_inputs, valid_index, all_out = bounding_box.prepare_inputs( input_shape, inputs ) assert new_inputs == () assert len(valid_index) == 1 assert (valid_index[0] == []).all() assert all_out and isinstance(all_out, bool) # Scalar input inside bounding_box inputs = (0.5, 0.5) input_shape = (1,) new_inputs, valid_index, all_out = bounding_box.prepare_inputs( input_shape, inputs ) assert (np.array(new_inputs) == np.array([[0.5], [0.5]])).all() assert len(valid_index) == 1 assert (valid_index[0] == [0]).all() assert not all_out and isinstance(all_out, bool) # Scalar input outside bounding_box inputs = (2, -1) input_shape = (1,) new_inputs, valid_index, all_out = bounding_box.prepare_inputs( input_shape, inputs ) assert new_inputs == () assert len(valid_index) == 1 assert (valid_index[0] == []).all() assert all_out and isinstance(all_out, bool) def test_bounding_box_ignore(self): """Regression test for #13028""" bbox_x = ModelBoundingBox((9, 10), Polynomial2D(1), ignored=["x"]) assert bbox_x.ignored_inputs == ["x"] bbox_y = ModelBoundingBox((11, 12), Polynomial2D(1), ignored=["y"]) assert bbox_y.ignored_inputs == ["y"] class Test_SelectorArgument: def test_create(self): index = mk.MagicMock() ignore = mk.MagicMock() argument = _SelectorArgument(index, ignore) assert isinstance(argument, _BaseSelectorArgument) assert argument.index == index assert argument.ignore == ignore assert argument == (index, ignore) def test_validate(self): model = Gaussian2D() # default integer assert _SelectorArgument.validate(model, 0) == (0, True) assert _SelectorArgument.validate(model, 1) == (1, True) # default string assert _SelectorArgument.validate(model, "x") == (0, True) assert _SelectorArgument.validate(model, "y") == (1, True) ignore = mk.MagicMock() # non-default integer assert _SelectorArgument.validate(model, 0, ignore) == (0, ignore) assert _SelectorArgument.validate(model, 1, ignore) == (1, ignore) # non-default string assert _SelectorArgument.validate(model, "x", ignore) == (0, ignore) assert _SelectorArgument.validate(model, "y", ignore) == (1, ignore) # Fail with pytest.raises(ValueError, match=r"'.*' is not one of the inputs: .*"): _SelectorArgument.validate(model, "z") with pytest.raises( ValueError, match=r"Key value: .* must be string or integer." ): _SelectorArgument.validate(model, mk.MagicMock()) with pytest.raises( IndexError, match=r"Integer key: .* must be non-negative and < .*" ): _SelectorArgument.validate(model, 2) def test_get_selector(self): # single inputs inputs = [idx + 17 for idx in range(3)] for index in range(3): assert ( _SelectorArgument(index, mk.MagicMock()).get_selector(*inputs) == inputs[index] ) # numpy array of single inputs inputs = [np.array([idx + 11]) for idx in range(3)] for index in range(3): assert ( _SelectorArgument(index, mk.MagicMock()).get_selector(*inputs) == inputs[index] ) inputs = [np.asanyarray(idx + 13) for idx in range(3)] for index in range(3): assert ( _SelectorArgument(index, mk.MagicMock()).get_selector(*inputs) == inputs[index] ) # multi entry numpy array inputs = [np.array([idx + 27, idx - 31]) for idx in range(3)] for index in range(3): assert _SelectorArgument(index, mk.MagicMock()).get_selector( *inputs ) == tuple(inputs[index]) def test_name(self): model = Gaussian2D() for index in range(model.n_inputs): assert ( _SelectorArgument(index, mk.MagicMock()).name(model) == model.inputs[index] ) def test_pretty_repr(self): model = Gaussian2D() assert ( _SelectorArgument(0, False).pretty_repr(model) == "Argument(name='x', ignore=False)" ) assert ( _SelectorArgument(0, True).pretty_repr(model) == "Argument(name='x', ignore=True)" ) assert ( _SelectorArgument(1, False).pretty_repr(model) == "Argument(name='y', ignore=False)" ) assert ( _SelectorArgument(1, True).pretty_repr(model) == "Argument(name='y', ignore=True)" ) def test_get_fixed_value(self): model = Gaussian2D() values = {0: 5, "y": 7} # Get index value assert _SelectorArgument(0, mk.MagicMock()).get_fixed_value(model, values) == 5 # Get name value assert _SelectorArgument(1, mk.MagicMock()).get_fixed_value(model, values) == 7 # Fail MESSAGE = r".* was not found in .*" with pytest.raises(RuntimeError, match=MESSAGE) as err: _SelectorArgument(1, True).get_fixed_value(model, {0: 5}) def test_is_argument(self): model = Gaussian2D() argument = _SelectorArgument.validate(model, 0) # Is true assert argument.is_argument(model, 0) is True assert argument.is_argument(model, "x") is True # Is false assert argument.is_argument(model, 1) is False assert argument.is_argument(model, "y") is False # Fail with pytest.raises(ValueError, match=r"'.*' is not one of the inputs: .*"): argument.is_argument(model, "z") with pytest.raises( ValueError, match=r"Key value: .* must be string or integer" ): argument.is_argument(model, mk.MagicMock()) with pytest.raises( IndexError, match=r"Integer key: .* must be non-negative and < .*" ): argument.is_argument(model, 2) def test_named_tuple(self): model = Gaussian2D() for index in range(model.n_inputs): ignore = mk.MagicMock() assert _SelectorArgument(index, ignore).named_tuple(model) == ( model.inputs[index], ignore, ) class Test_SelectorArguments: def test_create(self): arguments = _SelectorArguments( (_SelectorArgument(0, True), _SelectorArgument(1, False)) ) assert isinstance(arguments, _SelectorArguments) assert arguments == ((0, True), (1, False)) assert arguments._kept_ignore == [] kept_ignore = mk.MagicMock() arguments = _SelectorArguments( (_SelectorArgument(0, True), _SelectorArgument(1, False)), kept_ignore ) assert isinstance(arguments, _SelectorArguments) assert arguments == ((0, True), (1, False)) assert arguments._kept_ignore == kept_ignore def test_pretty_repr(self): model = Gaussian2D() arguments = _SelectorArguments( (_SelectorArgument(0, True), _SelectorArgument(1, False)) ) assert ( arguments.pretty_repr(model) == "SelectorArguments(\n" " Argument(name='x', ignore=True)\n" " Argument(name='y', ignore=False)\n" ")" ) def test_ignore(self): assert _SelectorArguments( (_SelectorArgument(0, True), _SelectorArgument(1, True)) ).ignore == [0, 1] assert _SelectorArguments( (_SelectorArgument(0, True), _SelectorArgument(1, True)), [13, 4] ).ignore == [0, 1, 13, 4] assert _SelectorArguments( (_SelectorArgument(0, True), _SelectorArgument(1, False)) ).ignore == [0] assert _SelectorArguments( (_SelectorArgument(0, False), _SelectorArgument(1, True)) ).ignore == [1] assert ( _SelectorArguments( (_SelectorArgument(0, False), _SelectorArgument(1, False)) ).ignore == [] ) assert _SelectorArguments( (_SelectorArgument(0, False), _SelectorArgument(1, False)), [17, 14] ).ignore == [17, 14] def test_validate(self): # Integer key and passed ignore arguments = _SelectorArguments.validate(Gaussian2D(), ((0, True), (1, False))) assert isinstance(arguments, _SelectorArguments) assert arguments == ((0, True), (1, False)) assert arguments.kept_ignore == [] # Default ignore arguments = _SelectorArguments.validate(Gaussian2D(), ((0,), (1,))) assert isinstance(arguments, _SelectorArguments) assert arguments == ((0, True), (1, True)) assert arguments.kept_ignore == [] # String key and passed ignore arguments = _SelectorArguments.validate( Gaussian2D(), (("x", True), ("y", False)) ) assert isinstance(arguments, _SelectorArguments) assert arguments == ((0, True), (1, False)) assert arguments.kept_ignore == [] # Test kept_ignore option new_arguments = _SelectorArguments.validate(Gaussian2D(), arguments, [11, 5, 8]) assert isinstance(new_arguments, _SelectorArguments) assert new_arguments == ((0, True), (1, False)) assert new_arguments.kept_ignore == [11, 5, 8] arguments._kept_ignore = [13, 17, 14] new_arguments = _SelectorArguments.validate(Gaussian2D(), arguments) assert isinstance(new_arguments, _SelectorArguments) assert new_arguments == ((0, True), (1, False)) assert new_arguments.kept_ignore == [13, 17, 14] # Invalid, bad argument with pytest.raises(ValueError, match=r"'.*' is not one of the inputs: .*"): _SelectorArguments.validate(Gaussian2D(), ((0, True), ("z", False))) with pytest.raises( ValueError, match=r"Key value: .* must be string or integer" ): _SelectorArguments.validate( Gaussian2D(), ((mk.MagicMock(), True), (1, False)) ) with pytest.raises( IndexError, match=r"Integer key: .* must be non-negative and < .*" ): _SelectorArguments.validate(Gaussian2D(), ((0, True), (2, False))) # Invalid, repeated argument with pytest.raises(ValueError, match=r"Input: 'x' has been repeated"): _SelectorArguments.validate(Gaussian2D(), ((0, True), (0, False))) # Invalid, no arguments with pytest.raises( ValueError, match=r"There must be at least one selector argument" ): _SelectorArguments.validate(Gaussian2D(), ()) def test_get_selector(self): inputs = [idx + 19 for idx in range(4)] assert _SelectorArguments.validate( Gaussian2D(), ((0, True), (1, False)) ).get_selector(*inputs) == tuple(inputs[:2]) assert _SelectorArguments.validate( Gaussian2D(), ((1, True), (0, False)) ).get_selector(*inputs) == tuple(inputs[:2][::-1]) assert _SelectorArguments.validate(Gaussian2D(), ((1, False),)).get_selector( *inputs ) == (inputs[1],) assert _SelectorArguments.validate(Gaussian2D(), ((0, True),)).get_selector( *inputs ) == (inputs[0],) def test_is_selector(self): # Is Selector assert _SelectorArguments.validate( Gaussian2D(), ((0, True), (1, False)) ).is_selector((0.5, 2.5)) assert _SelectorArguments.validate(Gaussian2D(), ((0, True),)).is_selector( (0.5,) ) # Is not selector assert not _SelectorArguments.validate( Gaussian2D(), ((0, True), (1, False)) ).is_selector((0.5, 2.5, 3.5)) assert not _SelectorArguments.validate( Gaussian2D(), ((0, True), (1, False)) ).is_selector((0.5,)) assert not _SelectorArguments.validate( Gaussian2D(), ((0, True), (1, False)) ).is_selector(0.5) assert not _SelectorArguments.validate(Gaussian2D(), ((0, True),)).is_selector( (0.5, 2.5) ) assert not _SelectorArguments.validate(Gaussian2D(), ((0, True),)).is_selector( 2.5 ) def test_get_fixed_values(self): model = Gaussian2D() assert _SelectorArguments.validate( model, ((0, True), (1, False)) ).get_fixed_values(model, {0: 11, 1: 7}) == (11, 7) assert _SelectorArguments.validate( model, ((0, True), (1, False)) ).get_fixed_values(model, {0: 5, "y": 47}) == (5, 47) assert _SelectorArguments.validate( model, ((0, True), (1, False)) ).get_fixed_values(model, {"x": 2, "y": 9}) == (2, 9) assert _SelectorArguments.validate( model, ((0, True), (1, False)) ).get_fixed_values(model, {"x": 12, 1: 19}) == (12, 19) def test_is_argument(self): model = Gaussian2D() # Is true arguments = _SelectorArguments.validate(model, ((0, True), (1, False))) assert arguments.is_argument(model, 0) is True assert arguments.is_argument(model, "x") is True assert arguments.is_argument(model, 1) is True assert arguments.is_argument(model, "y") is True # Is true and false arguments = _SelectorArguments.validate(model, ((0, True),)) assert arguments.is_argument(model, 0) is True assert arguments.is_argument(model, "x") is True assert arguments.is_argument(model, 1) is False assert arguments.is_argument(model, "y") is False arguments = _SelectorArguments.validate(model, ((1, False),)) assert arguments.is_argument(model, 0) is False assert arguments.is_argument(model, "x") is False assert arguments.is_argument(model, 1) is True assert arguments.is_argument(model, "y") is True def test_selector_index(self): model = Gaussian2D() arguments = _SelectorArguments.validate(model, ((0, True), (1, False))) assert arguments.selector_index(model, 0) == 0 assert arguments.selector_index(model, "x") == 0 assert arguments.selector_index(model, 1) == 1 assert arguments.selector_index(model, "y") == 1 arguments = _SelectorArguments.validate(model, ((1, True), (0, False))) assert arguments.selector_index(model, 0) == 1 assert arguments.selector_index(model, "x") == 1 assert arguments.selector_index(model, 1) == 0 assert arguments.selector_index(model, "y") == 0 # Error arguments = _SelectorArguments.validate(model, ((0, True),)) with pytest.raises( ValueError, match=r"y does not correspond to any selector argument" ): arguments.selector_index(model, "y") def test_add_ignore(self): model = Gaussian2D() arguments = _SelectorArguments.validate(model, ((0, True),)) assert arguments == ((0, True),) assert arguments._kept_ignore == [] new_arguments0 = arguments.add_ignore(model, 1) assert new_arguments0 == arguments assert new_arguments0._kept_ignore == [1] assert arguments._kept_ignore == [] assert arguments._kept_ignore == [] new_arguments1 = new_arguments0.add_ignore(model, "y") assert new_arguments1 == arguments == new_arguments0 assert new_arguments0._kept_ignore == [1] assert new_arguments1._kept_ignore == [1, 1] assert arguments._kept_ignore == [] # Error with pytest.raises( ValueError, match=r"0: is a selector argument and cannot be ignored" ): arguments.add_ignore(model, 0) def test_reduce(self): model = Gaussian2D() arguments = _SelectorArguments.validate(model, ((0, True), (1, False))) new_arguments = arguments.reduce(model, 0) assert isinstance(new_arguments, _SelectorArguments) assert new_arguments == ((1, False),) assert new_arguments._kept_ignore == [0] assert arguments._kept_ignore == [] new_arguments = arguments.reduce(model, "x") assert isinstance(new_arguments, _SelectorArguments) assert new_arguments == ((1, False),) assert new_arguments._kept_ignore == [0] assert arguments._kept_ignore == [] new_arguments = arguments.reduce(model, 1) assert isinstance(new_arguments, _SelectorArguments) assert new_arguments == ((0, True),) assert new_arguments._kept_ignore == [1] assert arguments._kept_ignore == [] new_arguments = arguments.reduce(model, "y") assert isinstance(new_arguments, _SelectorArguments) assert new_arguments == ((0, True),) assert new_arguments._kept_ignore == [1] assert arguments._kept_ignore == [] def test_named_tuple(self): model = Gaussian2D() arguments = _SelectorArguments.validate(model, ((0, True), (1, False))) assert arguments.named_tuple(model) == (("x", True), ("y", False)) class TestCompoundBoundingBox: def test_create(self): model = Gaussian2D() selector_args = ((0, True),) bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)} create_selector = mk.MagicMock() bounding_box = CompoundBoundingBox( bounding_boxes, model, selector_args, create_selector, order="F" ) assert (bounding_box._model.parameters == model.parameters).all() assert bounding_box._selector_args == selector_args for _selector, bbox in bounding_boxes.items(): assert _selector in bounding_box._bounding_boxes assert bounding_box._bounding_boxes[_selector] == bbox for _selector, bbox in bounding_box._bounding_boxes.items(): assert _selector in bounding_boxes assert bounding_boxes[_selector] == bbox assert isinstance(bbox, ModelBoundingBox) assert bounding_box._bounding_boxes == bounding_boxes assert bounding_box._create_selector == create_selector assert bounding_box._order == "F" def test_copy(self): bounding_box = CompoundBoundingBox.validate( Gaussian2D(), {(1,): (-1.5, 1.3), (2,): (-2.7, 2.4)}, ((0, True),), mk.MagicMock(), ) copy = bounding_box.copy() assert bounding_box == copy assert id(bounding_box) != id(copy) # model is not copied to prevent infinite recursion assert bounding_box._model == copy._model assert id(bounding_box._model) == id(copy._model) # Same string values have will have same id assert bounding_box._order == copy._order assert id(bounding_box._order) == id(copy._order) assert bounding_box._create_selector == copy._create_selector assert id(bounding_box._create_selector) != id(copy._create_selector) # Check selector_args for index, argument in enumerate(bounding_box.selector_args): assert argument == copy.selector_args[index] assert id(argument) != id(copy.selector_args[index]) # Same integer values have will have same id assert argument.index == copy.selector_args[index].index assert id(argument.index) == id(copy.selector_args[index].index) # Same boolean values have will have same id assert argument.ignore == copy.selector_args[index].ignore assert id(argument.ignore) == id(copy.selector_args[index].ignore) assert len(bounding_box.selector_args) == len(copy.selector_args) # Check bounding_boxes for selector, bbox in bounding_box.bounding_boxes.items(): assert bbox == copy.bounding_boxes[selector] assert id(bbox) != id(copy.bounding_boxes[selector]) assert bbox.ignored == copy.bounding_boxes[selector].ignored assert id(bbox.ignored) != id(copy.bounding_boxes[selector].ignored) # model is not copied to prevent infinite recursion assert bbox._model == copy.bounding_boxes[selector]._model assert id(bbox._model) == id(copy.bounding_boxes[selector]._model) # Same string values have will have same id assert bbox._order == copy.bounding_boxes[selector]._order assert id(bbox._order) == id(copy.bounding_boxes[selector]._order) # Check interval objects for index, interval in bbox.intervals.items(): assert interval == copy.bounding_boxes[selector].intervals[index] assert id(interval) != id( copy.bounding_boxes[selector].intervals[index] ) # Same float values have will have same id assert ( interval.lower == copy.bounding_boxes[selector].intervals[index].lower ) assert id(interval.lower) == id( copy.bounding_boxes[selector].intervals[index].lower ) # Same float values have will have same id assert ( interval.upper == copy.bounding_boxes[selector].intervals[index].upper ) assert id(interval.upper) == id( copy.bounding_boxes[selector].intervals[index].upper ) assert len(bbox.intervals) == len(copy.bounding_boxes[selector].intervals) assert ( bbox.intervals.keys() == copy.bounding_boxes[selector].intervals.keys() ) assert len(bounding_box.bounding_boxes) == len(copy.bounding_boxes) assert bounding_box.bounding_boxes.keys() == copy.bounding_boxes.keys() def test___repr__(self): model = Gaussian2D() selector_args = ((0, True),) bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)} bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args) assert ( bounding_box.__repr__() == "CompoundBoundingBox(\n" " bounding_boxes={\n" " (1,) = ModelBoundingBox(\n" " intervals={\n" " y: Interval(lower=-1, upper=1)\n" " }\n" " ignored=['x']\n" " model=Gaussian2D(inputs=('x', 'y'))\n" " order='C'\n" " )\n" " (2,) = ModelBoundingBox(\n" " intervals={\n" " y: Interval(lower=-2, upper=2)\n" " }\n" " ignored=['x']\n" " model=Gaussian2D(inputs=('x', 'y'))\n" " order='C'\n" " )\n" " }\n" " selector_args = SelectorArguments(\n" " Argument(name='x', ignore=True)\n" " )\n" ")" ) def test_bounding_boxes(self): model = Gaussian2D() selector_args = ((0, True),) bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)} bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args) assert bounding_box._bounding_boxes == bounding_boxes assert bounding_box.bounding_boxes == bounding_boxes def test_selector_args(self): model = Gaussian2D() selector_args = ((0, True),) bounding_box = CompoundBoundingBox({}, model, selector_args) # Get assert bounding_box._selector_args == selector_args assert bounding_box.selector_args == selector_args # Set selector_args = ((1, False),) with pytest.warns(RuntimeWarning, match=r"Overriding selector_args.*"): bounding_box.selector_args = selector_args assert bounding_box._selector_args == selector_args assert bounding_box.selector_args == selector_args def test_create_selector(self): model = Gaussian2D() create_selector = mk.MagicMock() bounding_box = CompoundBoundingBox({}, model, ((1,),), create_selector) assert bounding_box._create_selector == create_selector assert bounding_box.create_selector == create_selector def test__get_selector_key(self): bounding_box = CompoundBoundingBox({}, Gaussian2D(), ((1, True),)) assert len(bounding_box.bounding_boxes) == 0 # Singlar assert bounding_box._get_selector_key(5) == (5,) assert bounding_box._get_selector_key((5,)) == (5,) assert bounding_box._get_selector_key([5]) == (5,) assert bounding_box._get_selector_key(np.asanyarray(5)) == (5,) assert bounding_box._get_selector_key(np.array([5])) == (5,) # multiple assert bounding_box._get_selector_key((5, 19)) == (5, 19) assert bounding_box._get_selector_key([5, 19]) == (5, 19) assert bounding_box._get_selector_key(np.array([5, 19])) == (5, 19) def test___setitem__(self): model = Gaussian2D() # Ignored argument bounding_box = CompoundBoundingBox({}, model, ((1, True),), order="F") assert len(bounding_box.bounding_boxes) == 0 # Valid bounding_box[(15,)] = (-15, 15) assert len(bounding_box.bounding_boxes) == 1 assert (15,) in bounding_box._bounding_boxes assert isinstance(bounding_box._bounding_boxes[(15,)], ModelBoundingBox) assert bounding_box._bounding_boxes[(15,)] == (-15, 15) assert bounding_box._bounding_boxes[(15,)].order == "F" # Invalid key assert (7, 13) not in bounding_box._bounding_boxes with pytest.raises(ValueError, match=".* is not a selector!"): bounding_box[(7, 13)] = (-7, 7) assert (7, 13) not in bounding_box._bounding_boxes assert len(bounding_box.bounding_boxes) == 1 # Invalid bounding box assert 13 not in bounding_box._bounding_boxes with pytest.raises( ValueError, match="An interval must be some sort of sequence of length 2" ): bounding_box[(13,)] = ((-13, 13), (-3, 3)) assert 13 not in bounding_box._bounding_boxes assert len(bounding_box.bounding_boxes) == 1 # No ignored argument bounding_box = CompoundBoundingBox({}, model, ((1, False),), order="F") assert len(bounding_box.bounding_boxes) == 0 # Valid bounding_box[(15,)] = ((-15, 15), (-6, 6)) assert len(bounding_box.bounding_boxes) == 1 assert (15,) in bounding_box._bounding_boxes assert isinstance(bounding_box._bounding_boxes[(15,)], ModelBoundingBox) assert bounding_box._bounding_boxes[(15,)] == ((-15, 15), (-6, 6)) assert bounding_box._bounding_boxes[(15,)].order == "F" # Invalid key assert (14, 11) not in bounding_box._bounding_boxes with pytest.raises(ValueError, match=".* is not a selector!"): bounding_box[(14, 11)] = ((-7, 7), (-12, 12)) assert (14, 11) not in bounding_box._bounding_boxes assert len(bounding_box.bounding_boxes) == 1 # Invalid bounding box assert 13 not in bounding_box._bounding_boxes with pytest.raises( ValueError, match="An interval must be some sort of sequence of length 2" ): bounding_box[(13,)] = (-13, 13) assert 13 not in bounding_box._bounding_boxes assert len(bounding_box.bounding_boxes) == 1 def test__validate(self): model = Gaussian2D() selector_args = ((0, True),) # Tuple selector_args bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)} bounding_box = CompoundBoundingBox({}, model, selector_args) bounding_box._validate(bounding_boxes) for _selector, bbox in bounding_boxes.items(): assert _selector in bounding_box._bounding_boxes assert bounding_box._bounding_boxes[_selector] == bbox for _selector, bbox in bounding_box._bounding_boxes.items(): assert _selector in bounding_boxes assert bounding_boxes[_selector] == bbox assert isinstance(bbox, ModelBoundingBox) assert bounding_box._bounding_boxes == bounding_boxes def test___eq__(self): bounding_box_1 = CompoundBoundingBox( {(1,): (-1, 1), (2,): (-2, 2)}, Gaussian2D(), ((0, True),) ) bounding_box_2 = CompoundBoundingBox( {(1,): (-1, 1), (2,): (-2, 2)}, Gaussian2D(), ((0, True),) ) # Equal assert bounding_box_1 == bounding_box_2 # Not equal to non-compound bounding_box assert not bounding_box_1 == mk.MagicMock() assert not bounding_box_2 == mk.MagicMock() # Not equal bounding_boxes bounding_box_2[(15,)] = (-15, 15) assert not bounding_box_1 == bounding_box_2 del bounding_box_2._bounding_boxes[(15,)] assert bounding_box_1 == bounding_box_2 # Not equal selector_args bounding_box_2._selector_args = _SelectorArguments.validate( Gaussian2D(), ((0, False),) ) assert not bounding_box_1 == bounding_box_2 bounding_box_2._selector_args = _SelectorArguments.validate( Gaussian2D(), ((0, True),) ) assert bounding_box_1 == bounding_box_2 # Not equal create_selector bounding_box_2._create_selector = mk.MagicMock() assert not bounding_box_1 == bounding_box_2 def test_validate(self): model = Gaussian2D() selector_args = ((0, True),) bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)} create_selector = mk.MagicMock() # Fail selector_args MESSAGE = r"Selector arguments must be provided .*" with pytest.raises(ValueError, match=MESSAGE): CompoundBoundingBox.validate(model, bounding_boxes) # Normal validate bounding_box = CompoundBoundingBox.validate( model, bounding_boxes, selector_args, create_selector, order="F" ) assert (bounding_box._model.parameters == model.parameters).all() assert bounding_box._selector_args == selector_args assert bounding_box._bounding_boxes == bounding_boxes assert bounding_box._create_selector == create_selector assert bounding_box._order == "F" # Re-validate new_bounding_box = CompoundBoundingBox.validate(model, bounding_box) assert bounding_box == new_bounding_box assert new_bounding_box._order == "F" # Default order bounding_box = CompoundBoundingBox.validate( model, bounding_boxes, selector_args, create_selector ) assert (bounding_box._model.parameters == model.parameters).all() assert bounding_box._selector_args == selector_args assert bounding_box._bounding_boxes == bounding_boxes assert bounding_box._create_selector == create_selector assert bounding_box._order == "C" def test___contains__(self): model = Gaussian2D() selector_args = ((0, True),) bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)} bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args) assert (1,) in bounding_box assert (2,) in bounding_box assert (3,) not in bounding_box assert 1 not in bounding_box assert 2 not in bounding_box def test__create_bounding_box(self): model = Gaussian2D() create_selector = mk.MagicMock() bounding_box = CompoundBoundingBox({}, model, ((1, False),), create_selector) # Create is successful create_selector.return_value = ((-15, 15), (-23, 23)) assert len(bounding_box._bounding_boxes) == 0 bbox = bounding_box._create_bounding_box((7,)) assert isinstance(bbox, ModelBoundingBox) assert bbox == ((-15, 15), (-23, 23)) assert len(bounding_box._bounding_boxes) == 1 assert (7,) in bounding_box assert isinstance(bounding_box[(7,)], ModelBoundingBox) assert bounding_box[(7,)] == bbox # Create is unsuccessful create_selector.return_value = (-42, 42) with pytest.raises( ValueError, match="An interval must be some sort of sequence of length 2" ): bounding_box._create_bounding_box((27,)) def test___getitem__(self): model = Gaussian2D() selector_args = ((0, True),) bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)} bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args) # already exists assert isinstance(bounding_box[1], ModelBoundingBox) assert bounding_box[1] == (-1, 1) assert isinstance(bounding_box[(2,)], ModelBoundingBox) assert bounding_box[2] == (-2, 2) assert isinstance(bounding_box[(1,)], ModelBoundingBox) assert bounding_box[(1,)] == (-1, 1) assert isinstance(bounding_box[(2,)], ModelBoundingBox) assert bounding_box[(2,)] == (-2, 2) # no selector with pytest.raises( RuntimeError, match="No bounding box is defined for selector: .*" ): bounding_box[(3,)] # Create a selector bounding_box._create_selector = mk.MagicMock() with mk.patch.object( CompoundBoundingBox, "_create_bounding_box", autospec=True ) as mkCreate: assert bounding_box[(3,)] == mkCreate.return_value assert mkCreate.call_args_list == [mk.call(bounding_box, (3,))] def test__select_bounding_box(self): model = Gaussian2D() selector_args = ((0, True),) bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)} bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args) inputs = [mk.MagicMock() for _ in range(3)] with mk.patch.object( _SelectorArguments, "get_selector", autospec=True ) as mkSelector: with mk.patch.object( CompoundBoundingBox, "__getitem__", autospec=True ) as mkGet: assert bounding_box._select_bounding_box(inputs) == mkGet.return_value assert mkGet.call_args_list == [ mk.call(bounding_box, mkSelector.return_value) ] assert mkSelector.call_args_list == [ mk.call(bounding_box.selector_args, *inputs) ] def test_prepare_inputs(self): model = Gaussian2D() selector_args = ((0, True),) bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)} bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args) input_shape = mk.MagicMock() with mk.patch.object( ModelBoundingBox, "prepare_inputs", autospec=True ) as mkPrepare: assert ( bounding_box.prepare_inputs(input_shape, [1, 2, 3]) == mkPrepare.return_value ) assert mkPrepare.call_args_list == [ mk.call(bounding_box[(1,)], input_shape, [1, 2, 3]) ] mkPrepare.reset_mock() assert ( bounding_box.prepare_inputs(input_shape, [2, 2, 3]) == mkPrepare.return_value ) assert mkPrepare.call_args_list == [ mk.call(bounding_box[(2,)], input_shape, [2, 2, 3]) ] mkPrepare.reset_mock() def test__matching_bounding_boxes(self): # Single selector index selector_args = ((0, False),) bounding_boxes = { (1,): ((-1, 1), (-2, 2)), (2,): ((-2, 2), (-3, 3)), (3,): ((-3, 3), (-4, 4)), } bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args) for value in [1, 2, 3]: matching = bounding_box._matching_bounding_boxes("x", value) assert isinstance(matching, dict) assert () in matching bbox = matching[()] assert isinstance(bbox, ModelBoundingBox) assert (bbox._model.parameters == Gaussian2D().parameters).all() assert "x" in bbox assert "x" in bbox.ignored_inputs assert "y" in bbox assert bbox["y"] == (-value, value) assert len(bbox.intervals) == 1 assert bbox.ignored == [0] # Multiple selector index selector_args = ((0, False), (1, False)) bounding_boxes = { (1, 3): ((-1, 1), (-2, 2)), (2, 2): ((-2, 2), (-3, 3)), (3, 1): ((-3, 3), (-4, 4)), } bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args) for value in [1, 2, 3]: matching = bounding_box._matching_bounding_boxes("x", value) assert isinstance(matching, dict) assert (4 - value,) in matching bbox = matching[(4 - value,)] assert isinstance(bbox, ModelBoundingBox) assert (bbox._model.parameters == Gaussian2D().parameters).all() assert "x" in bbox assert "x" in bbox.ignored_inputs assert "y" in bbox assert bbox["y"] == (-value, value) assert len(bbox.intervals) == 1 assert bbox.ignored == [0] matching = bounding_box._matching_bounding_boxes("y", value) assert isinstance(matching, dict) assert (4 - value,) in matching bbox = matching[(4 - value,)] assert isinstance(bbox, ModelBoundingBox) assert (bbox._model.parameters == Gaussian2D().parameters).all() assert "y" in bbox assert "y" in bbox.ignored_inputs assert "x" in bbox assert bbox["x"] == (-(5 - value), (5 - value)) assert len(bbox.intervals) == 1 assert bbox.ignored == [1] # Real fix input of slicing input model = Shift(1) & Scale(2) & Identity(1) model.inputs = ("x", "y", "slit_id") bounding_boxes = { (0,): ((-0.5, 1047.5), (-0.5, 2047.5)), (1,): ((-0.5, 3047.5), (-0.5, 4047.5)), } bounding_box = CompoundBoundingBox.validate( model, bounding_boxes, selector_args=[("slit_id", True)], order="F" ) matching = bounding_box._matching_bounding_boxes("slit_id", 0) assert isinstance(matching, dict) assert () in matching bbox = matching[()] assert isinstance(bbox, ModelBoundingBox) assert (bbox._model.parameters == model.parameters).all() assert bbox.ignored_inputs == ["slit_id"] assert bbox.named_intervals == {"x": (-0.5, 1047.5), "y": (-0.5, 2047.5)} assert bbox.order == "F" matching = bounding_box._matching_bounding_boxes("slit_id", 1) assert isinstance(matching, dict) assert () in matching bbox = matching[()] assert isinstance(bbox, ModelBoundingBox) assert (bbox._model.parameters == model.parameters).all() assert bbox.ignored_inputs == ["slit_id"] assert bbox.named_intervals == {"x": (-0.5, 3047.5), "y": (-0.5, 4047.5)} assert bbox.order == "F" # Errors MESSAGE = ( r"Attempting to fix input .*, but there are no bounding boxes for argument" r" value .*" ) with pytest.raises(ValueError, match=MESSAGE): bounding_box._matching_bounding_boxes("slit_id", 2) def test__fix_input_selector_arg(self): # Single selector index selector_args = ((0, False),) bounding_boxes = { (1,): ((-1, 1), (-2, 2)), (2,): ((-2, 2), (-3, 3)), (3,): ((-3, 3), (-4, 4)), } bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args) for value in [1, 2, 3]: bbox = bounding_box._fix_input_selector_arg("x", value) assert isinstance(bbox, ModelBoundingBox) assert (bbox._model.parameters == Gaussian2D().parameters).all() assert "x" in bbox assert "x" in bbox.ignored_inputs assert "y" in bbox assert bbox["y"] == (-value, value) assert len(bbox.intervals) == 1 assert bbox.ignored == [0] # Multiple selector index selector_args = ((0, False), (1, False)) bounding_boxes = { (1, 3): ((-1, 1), (-2, 2)), (2, 2): ((-2, 2), (-3, 3)), (3, 1): ((-3, 3), (-4, 4)), } bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args) for value in [1, 2, 3]: bbox = bounding_box._fix_input_selector_arg("x", value) assert isinstance(bbox, CompoundBoundingBox) assert (bbox._model.parameters == Gaussian2D().parameters).all() assert bbox.selector_args == ((1, False),) assert (4 - value,) in bbox bbox_selector = bbox[(4 - value,)] assert isinstance(bbox_selector, ModelBoundingBox) assert (bbox_selector._model.parameters == Gaussian2D().parameters).all() assert "x" in bbox_selector assert "x" in bbox_selector.ignored_inputs assert "y" in bbox_selector assert bbox_selector["y"] == (-value, value) assert len(bbox_selector.intervals) == 1 assert bbox_selector.ignored == [0] bbox = bounding_box._fix_input_selector_arg("y", value) assert isinstance(bbox, CompoundBoundingBox) assert (bbox._model.parameters == Gaussian2D().parameters).all() assert bbox.selector_args == ((0, False),) assert (4 - value,) in bbox bbox_selector = bbox[(4 - value,)] assert isinstance(bbox_selector, ModelBoundingBox) assert (bbox_selector._model.parameters == Gaussian2D().parameters).all() assert "y" in bbox_selector assert "y" in bbox_selector.ignored_inputs assert "x" in bbox_selector assert bbox_selector["x"] == (-(5 - value), (5 - value)) assert len(bbox_selector.intervals) == 1 assert bbox_selector.ignored == [1] # Real fix input of slicing input model = Shift(1) & Scale(2) & Identity(1) model.inputs = ("x", "y", "slit_id") bounding_boxes = { (0,): ((-0.5, 1047.5), (-0.5, 2047.5)), (1,): ((-0.5, 3047.5), (-0.5, 4047.5)), } bounding_box = CompoundBoundingBox.validate( model, bounding_boxes, selector_args=[("slit_id", True)], order="F" ) bbox = bounding_box._fix_input_selector_arg("slit_id", 0) assert isinstance(bbox, ModelBoundingBox) assert (bbox._model.parameters == model.parameters).all() assert bbox.ignored_inputs == ["slit_id"] assert bbox.named_intervals == {"x": (-0.5, 1047.5), "y": (-0.5, 2047.5)} assert bbox.order == "F" bbox = bounding_box._fix_input_selector_arg("slit_id", 1) assert isinstance(bbox, ModelBoundingBox) assert (bbox._model.parameters == model.parameters).all() assert bbox.ignored_inputs == ["slit_id"] assert bbox.named_intervals == {"x": (-0.5, 3047.5), "y": (-0.5, 4047.5)} assert bbox.order == "F" def test__fix_input_bbox_arg(self): model = Shift(1) & Scale(2) & Identity(1) model.inputs = ("x", "y", "slit_id") bounding_boxes = { (0,): ((-0.5, 1047.5), (-0.5, 2047.5)), (1,): ((-0.5, 3047.5), (-0.5, 4047.5)), } bounding_box = CompoundBoundingBox.validate( model, bounding_boxes, selector_args=[("slit_id", True)], order="F" ) bbox = bounding_box._fix_input_bbox_arg("x", 5) assert isinstance(bbox, CompoundBoundingBox) assert (bbox._model.parameters == model.parameters).all() assert bbox.selector_args == ((2, True),) assert bbox.selector_args._kept_ignore == [0] assert bbox._bounding_boxes[(0,)] == (-0.5, 2047.5) assert bbox._bounding_boxes[(1,)] == (-0.5, 4047.5) assert len(bbox._bounding_boxes) == 2 bbox = bounding_box._fix_input_bbox_arg("y", 5) assert isinstance(bbox, CompoundBoundingBox) assert (bbox._model.parameters == model.parameters).all() assert bbox.selector_args == ((2, True),) assert bbox.selector_args._kept_ignore == [1] assert bbox._bounding_boxes[(0,)] == (-0.5, 1047.5) assert bbox._bounding_boxes[(1,)] == (-0.5, 3047.5) assert len(bbox._bounding_boxes) == 2 def test_fix_inputs(self): model = Shift(1) & Scale(2) & Identity(1) model.inputs = ("x", "y", "slit_id") bounding_boxes = { (0,): ((-0.5, 1047.5), (-0.5, 2047.5)), (1,): ((-0.5, 3047.5), (-0.5, 4047.5)), } bounding_box = CompoundBoundingBox.validate( model, bounding_boxes, selector_args=[("slit_id", True)], order="F" ) model.bounding_box = bounding_box # Fix selector argument new_model = fix_inputs(model, {"slit_id": 0}) bbox = new_model.bounding_box assert isinstance(bbox, ModelBoundingBox) assert (bbox._model.parameters == new_model.parameters).all() assert bbox.ignored_inputs == [] assert bbox.named_intervals == {"x": (-0.5, 1047.5), "y": (-0.5, 2047.5)} assert bbox.order == "F" # Fix a bounding_box field new_model = fix_inputs(model, {"x": 5}) bbox = new_model.bounding_box assert isinstance(bbox, CompoundBoundingBox) assert (bbox._model.parameters == model.parameters).all() assert bbox.selector_args == ((1, True),) assert bbox.selector_args._kept_ignore == [] assert bbox._bounding_boxes[(0,)] == (-0.5, 2047.5) assert bbox._bounding_boxes[(0,)].order == "F" assert bbox._bounding_boxes[(1,)] == (-0.5, 4047.5) assert bbox._bounding_boxes[(1,)].order == "F" assert len(bbox._bounding_boxes) == 2 new_model = fix_inputs(model, {"y": 5}) bbox = new_model.bounding_box assert isinstance(bbox, CompoundBoundingBox) assert (bbox._model.parameters == model.parameters).all() assert bbox.selector_args == ((1, True),) assert bbox.selector_args._kept_ignore == [] assert bbox._bounding_boxes[(0,)] == (-0.5, 1047.5) assert bbox._bounding_boxes[(0,)].order == "F" assert bbox._bounding_boxes[(1,)] == (-0.5, 3047.5) assert bbox._bounding_boxes[(1,)].order == "F" assert len(bbox._bounding_boxes) == 2 # Fix selector argument and a bounding_box field new_model = fix_inputs(model, {"slit_id": 0, "x": 5}) bbox = new_model.bounding_box assert isinstance(bbox, ModelBoundingBox) assert (bbox._model.parameters == new_model.parameters).all() assert bbox.ignored_inputs == [] assert bbox.named_intervals == {"y": (-0.5, 2047.5)} assert bbox.order == "F" new_model = fix_inputs(model, {"y": 5, "slit_id": 1}) bbox = new_model.bounding_box assert isinstance(bbox, ModelBoundingBox) assert (bbox._model.parameters == new_model.parameters).all() assert bbox.ignored_inputs == [] assert bbox.named_intervals == {"x": (-0.5, 3047.5)} assert bbox.order == "F" # Fix two bounding_box fields new_model = fix_inputs(model, {"x": 5, "y": 7}) bbox = new_model.bounding_box assert isinstance(bbox, CompoundBoundingBox) assert bbox.selector_args == ((0, True),) assert bbox.selector_args._kept_ignore == [] assert bbox._bounding_boxes[(0,)] == (-np.inf, np.inf) assert bbox._bounding_boxes[(0,)].order == "F" assert bbox._bounding_boxes[(1,)] == (-np.inf, np.inf) assert bbox._bounding_boxes[(1,)].order == "F" assert len(bbox._bounding_boxes) == 2 def test_complex_compound_bounding_box(self): model = Identity(4) bounding_boxes = { (2.5, 1.3): ((-1, 1), (-3, 3)), (2.5, 2.71): ((-3, 3), (-1, 1)), } selector_args = (("x0", True), ("x1", True)) bbox = CompoundBoundingBox.validate(model, bounding_boxes, selector_args) assert bbox[(2.5, 1.3)] == ModelBoundingBox( ((-1, 1), (-3, 3)), model, ignored=["x0", "x1"] ) assert bbox[(2.5, 2.71)] == ModelBoundingBox( ((-3, 3), (-1, 1)), model, ignored=["x0", "x1"] )
d7efeca6330e5448b4a4a49173e25af05af7fb744cd2108888ce5a28deea2e90
# Licensed under a 3-clause BSD style license - see LICENSE.rst # pylint: disable=invalid-name, pointless-statement import pickle import numpy as np import pytest from numpy.testing import assert_allclose, assert_array_equal import astropy.units as u from astropy.modeling.core import CompoundModel, Model, ModelDefinitionError from astropy.modeling.fitting import LevMarLSQFitter from astropy.modeling.models import ( Chebyshev1D, Chebyshev2D, Const1D, Gaussian1D, Gaussian2D, Identity, Legendre1D, Legendre2D, Linear1D, Mapping, Polynomial1D, Polynomial2D, Rotation2D, Scale, Shift, Tabular1D, fix_inputs, ) from astropy.modeling.parameters import Parameter from astropy.utils.compat.optional_deps import HAS_SCIPY @pytest.mark.parametrize( ("expr", "result"), [ (lambda x, y: x + y, [5.0, 5.0]), (lambda x, y: x - y, [-1.0, -1.0]), (lambda x, y: x * y, [6.0, 6.0]), (lambda x, y: x / y, [2.0 / 3.0, 2.0 / 3.0]), (lambda x, y: x**y, [8.0, 8.0]), ], ) def test_model_set(expr, result): s = expr(Const1D((2, 2), n_models=2), Const1D((3, 3), n_models=2)) out = s(0, model_set_axis=False) assert_array_equal(out, result) @pytest.mark.parametrize( ("expr", "result"), [ (lambda x, y: x + y, [5.0, 5.0]), (lambda x, y: x - y, [-1.0, -1.0]), (lambda x, y: x * y, [6.0, 6.0]), (lambda x, y: x / y, [2.0 / 3.0, 2.0 / 3.0]), (lambda x, y: x**y, [8.0, 8.0]), ], ) def test_model_set_raises_value_error(expr, result): """Check that creating model sets with components whose _n_models are different raise a value error """ MESSAGE = r"Both operands must have equal values for .*" with pytest.raises(ValueError, match=MESSAGE): expr(Const1D((2, 2), n_models=2), Const1D(3, n_models=1)) @pytest.mark.parametrize( ("expr", "result"), [ (lambda x, y: x + y, 5.0), (lambda x, y: x - y, -1.0), (lambda x, y: x * y, 6.0), (lambda x, y: x / y, 2.0 / 3.0), (lambda x, y: x**y, 8.0), ], ) def test_two_model_instance_arithmetic_1d(expr, result): """ Like test_two_model_class_arithmetic_1d, but creates a new model from two model *instances* with fixed parameters. """ s = expr(Const1D(2), Const1D(3)) assert isinstance(s, CompoundModel) assert s.n_inputs == 1 assert s.n_outputs == 1 out = s(0) assert out == result assert isinstance(out, float) def test_simple_two_model_compose_1d(): """ Shift and Scale are two of the simplest models to test model composition with. """ S1 = Shift(2) | Scale(3) # First shift then scale assert isinstance(S1, CompoundModel) assert S1.n_inputs == 1 assert S1.n_outputs == 1 assert S1(1) == 9.0 S2 = Scale(2) | Shift(3) # First scale then shift assert isinstance(S2, CompoundModel) assert S2.n_inputs == 1 assert S2.n_outputs == 1 assert S2(1) == 5.0 # Test with array inputs assert_array_equal(S2([1, 2, 3]), [5.0, 7.0, 9.0]) def test_simple_two_model_compose_2d(): """ A simple example consisting of two rotations. """ r1 = Rotation2D(45) | Rotation2D(45) assert isinstance(r1, CompoundModel) assert r1.n_inputs == 2 assert r1.n_outputs == 2 assert_allclose(r1(0, 1), (-1, 0), atol=1e-10) r2 = Rotation2D(90) | Rotation2D(90) # Rotate twice by 90 degrees assert_allclose(r2(0, 1), (0, -1), atol=1e-10) # Compose R with itself to produce 4 rotations r3 = r1 | r1 assert_allclose(r3(0, 1), (0, -1), atol=1e-10) def test_n_submodels(): """ Test that CompoundModel.n_submodels properly returns the number of components. """ g2 = Gaussian1D() + Gaussian1D() assert g2.n_submodels == 2 g3 = g2 + Gaussian1D() assert g3.n_submodels == 3 g5 = g3 | g2 assert g5.n_submodels == 5 g7 = g5 / g2 assert g7.n_submodels == 7 def test_expression_formatting(): """ Test that the expression strings from compound models are formatted correctly. """ # For the purposes of this test it doesn't matter a great deal what # model(s) are used in the expression, I don't think G = Gaussian1D(1, 1, 1) G2 = Gaussian2D(1, 2, 3, 4, 5, 6) M = G + G assert M._format_expression() == "[0] + [1]" M = G + G + G assert M._format_expression() == "[0] + [1] + [2]" M = G + G * G assert M._format_expression() == "[0] + [1] * [2]" M = G * G + G assert M._format_expression() == "[0] * [1] + [2]" M = G + G * G + G assert M._format_expression() == "[0] + [1] * [2] + [3]" M = (G + G) * (G + G) assert M._format_expression() == "([0] + [1]) * ([2] + [3])" # This example uses parentheses in the expression, but those won't be # preserved in the expression formatting since they technically aren't # necessary, and there's no way to know that they were originally # parenthesized (short of some deep, and probably not worthwhile # introspection) M = (G * G) + (G * G) assert M._format_expression() == "[0] * [1] + [2] * [3]" M = G**G assert M._format_expression() == "[0] ** [1]" M = G + G**G assert M._format_expression() == "[0] + [1] ** [2]" M = (G + G) ** G assert M._format_expression() == "([0] + [1]) ** [2]" M = G + G | G assert M._format_expression() == "[0] + [1] | [2]" M = G + (G | G) assert M._format_expression() == "[0] + ([1] | [2])" M = G & G | G2 assert M._format_expression() == "[0] & [1] | [2]" M = G & (G | G) assert M._format_expression() == "[0] & ([1] | [2])" def test_basic_compound_inverse(): """ Test basic inversion of compound models in the limited sense supported for models made from compositions and joins only. """ t = (Shift(2) & Shift(3)) | (Scale(2) & Scale(3)) | Rotation2D(90) assert_allclose(t.inverse(*t(0, 1)), (0, 1)) @pytest.mark.parametrize( "model", [ Shift(0) + Shift(0) | Shift(0), Shift(0) - Shift(0) | Shift(0), Shift(0) * Shift(0) | Shift(0), Shift(0) / Shift(0) | Shift(0), Shift(0) ** Shift(0) | Shift(0), Gaussian1D(1, 2, 3) | Gaussian1D(4, 5, 6), ], ) def test_compound_unsupported_inverse(model): """ Ensure inverses aren't supported in cases where it shouldn't be. """ MESSAGE = r"No analytical or user-supplied inverse transform .*" with pytest.raises(NotImplementedError, match=MESSAGE): model.inverse def test_mapping_basic_permutations(): """ Tests a couple basic examples of the Mapping model--specifically examples that merely permute the outputs. """ x, y = Rotation2D(90)(1, 2) rs = Rotation2D(90) | Mapping((1, 0)) x_prime, y_prime = rs(1, 2) assert_allclose((x, y), (y_prime, x_prime)) # A more complicated permutation m = Rotation2D(90) & Scale(2) x, y, z = m(1, 2, 3) ms = m | Mapping((2, 0, 1)) x_prime, y_prime, z_prime = ms(1, 2, 3) assert_allclose((x, y, z), (y_prime, z_prime, x_prime)) def test_mapping_inverse(): """Tests inverting a compound model that includes a `Mapping`.""" rs1 = Rotation2D(12.1) & Scale(13.2) rs2 = Rotation2D(14.3) & Scale(15.4) # Rotates 2 of the coordinates and scales the third--then rotates on a # different axis and scales on the axis of rotation. No physical meaning # here just a simple test m = rs1 | Mapping([2, 0, 1]) | rs2 assert_allclose((0, 1, 2), m.inverse(*m(0, 1, 2)), atol=1e-08) def test_identity_input(): """ Test a case where an Identity (or Mapping) model is the first in a chain of composite models and thus is responsible for handling input broadcasting properly. Regression test for https://github.com/astropy/astropy/pull/3362 """ ident1 = Identity(1) shift = Shift(1) rotation = Rotation2D(angle=90) model = ident1 & shift | rotation assert_allclose(model(1, 2), [-3.0, 1.0]) def test_invalid_operands(): """ Test that certain operators do not work with models whose inputs/outputs do not match up correctly. """ MESSAGE = r"Unsupported operands for |:.*" with pytest.raises(ModelDefinitionError, match=MESSAGE): Rotation2D(90) | Gaussian1D(1, 0, 0.1) MESSAGE = r"Both operands must match numbers of inputs and outputs" with pytest.raises(ModelDefinitionError, match=MESSAGE): Rotation2D(90) + Gaussian1D(1, 0, 0.1) @pytest.mark.parametrize("poly", [Chebyshev2D(1, 2), Polynomial2D(2), Legendre2D(1, 2)]) def test_compound_with_polynomials_2d(poly): """ Tests that polynomials are scaled when used in compound models. Issue #3699 """ poly.parameters = [1, 2, 3, 4, 1, 2] shift = Shift(3) model = poly | shift x, y = np.mgrid[:20, :37] result_compound = model(x, y) result = shift(poly(x, y)) assert_allclose(result, result_compound) def test_fix_inputs(): g1 = Gaussian2D(1, 0, 0, 1, 2) g2 = Gaussian2D(1.5, 0.5, -0.2, 0.5, 0.3) sg1_1 = fix_inputs(g1, {1: 0}) assert_allclose(sg1_1(0), g1(0, 0)) assert_allclose(sg1_1([0, 1, 3]), g1([0, 1, 3], [0, 0, 0])) sg1_2 = fix_inputs(g1, {"x": 1}) assert_allclose(sg1_2(1.5), g1(1, 1.5)) gg1 = g1 & g2 sgg1_1 = fix_inputs(gg1, {1: 0.1, 3: 0.2}) assert_allclose(sgg1_1(0, 0), gg1(0, 0.1, 0, 0.2)) sgg1_2 = fix_inputs(gg1, {"x0": -0.1, 2: 0.1}) assert_allclose(sgg1_2(1, 1), gg1(-0.1, 1, 0.1, 1)) assert_allclose(sgg1_2(y0=1, y1=1), gg1(-0.1, 1, 0.1, 1)) def test_fix_inputs_invalid(): g1 = Gaussian2D(1, 0, 0, 1, 2) MESSAGE = r"Substitution key .* not among possible input choices" with pytest.raises(ValueError, match=MESSAGE): fix_inputs(g1, {"x0": 0, 0: 0}) with pytest.raises(ValueError, match=MESSAGE): fix_inputs(g1, {3: 2}) with pytest.raises(ValueError, match=MESSAGE): fix_inputs(g1, {np.int32(3): 2}) with pytest.raises(ValueError, match=MESSAGE): fix_inputs(g1, {np.int64(3): 2}) with pytest.raises(ValueError, match=MESSAGE): fix_inputs(g1, {"w": 2}) MESSAGE = r'Expected a dictionary for second argument of "fix_inputs"' with pytest.raises(ValueError, match=MESSAGE): fix_inputs(g1, (0, 1)) MESSAGE = r".*Illegal operator: ', '#'.*" with pytest.raises(ModelDefinitionError, match=MESSAGE): CompoundModel("#", g1, g1) MESSAGE = r"Too many input arguments - expected 1, got 2" with pytest.raises(ValueError, match=MESSAGE): gg1 = fix_inputs(g1, {0: 1}) gg1(2, y=2) with pytest.raises(ValueError, match=MESSAGE): gg1 = fix_inputs(g1, {np.int32(0): 1}) gg1(2, y=2) with pytest.raises(ValueError, match=MESSAGE): gg1 = fix_inputs(g1, {np.int64(0): 1}) gg1(2, y=2) def test_fix_inputs_with_bounding_box(): g1 = Gaussian2D(1, 0, 0, 1, 1) g2 = Gaussian2D(1, 0, 0, 1, 1) assert g1.bounding_box == ((-5.5, 5.5), (-5.5, 5.5)) gg1 = g1 & g2 gg1.bounding_box = ((-5.5, 5.5), (-5.4, 5.4), (-5.3, 5.3), (-5.2, 5.2)) assert gg1.bounding_box == ((-5.5, 5.5), (-5.4, 5.4), (-5.3, 5.3), (-5.2, 5.2)) sg = fix_inputs(gg1, {0: 0, 2: 0}) assert sg.bounding_box == ((-5.5, 5.5), (-5.3, 5.3)) g1 = Gaussian1D(10, 3, 1) g = g1 & g1 g.bounding_box = ((1, 4), (6, 8)) gf = fix_inputs(g, {0: 1}) assert gf.bounding_box == (1, 4) def test_indexing_on_instance(): """Test indexing on compound model instances.""" m = Gaussian1D(1, 0, 0.1) + Const1D(2) assert isinstance(m[0], Gaussian1D) assert isinstance(m[1], Const1D) assert m.param_names == ("amplitude_0", "mean_0", "stddev_0", "amplitude_1") # Test parameter equivalence assert m[0].amplitude == 1 == m.amplitude_0 assert m[0].mean == 0 == m.mean_0 assert m[0].stddev == 0.1 == m.stddev_0 assert m[1].amplitude == 2 == m.amplitude_1 # Test that parameter value updates are symmetric between the compound # model and the submodel returned by indexing const = m[1] m.amplitude_1 = 42 assert const.amplitude == 42 const.amplitude = 137 assert m.amplitude_1 == 137 # Similar couple of tests, but now where the compound model was created # from model instances g = Gaussian1D(1, 2, 3, name="g") p = Polynomial1D(2, name="p") m = g + p assert m[0].name == "g" assert m[1].name == "p" assert m["g"].name == "g" assert m["p"].name == "p" poly = m[1] m.c0_1 = 12345 assert poly.c0 == 12345 poly.c1 = 6789 assert m.c1_1 == 6789 # Test negative indexing assert isinstance(m[-1], Polynomial1D) assert isinstance(m[-2], Gaussian1D) MESSAGE = r"list index out of range" with pytest.raises(IndexError, match=MESSAGE): m[42] MESSAGE = r"No component with name 'foobar' found" with pytest.raises(IndexError, match=MESSAGE): m["foobar"] # Confirm index-by-name works with fix_inputs g = Gaussian2D(1, 2, 3, 4, 5, name="g") m = fix_inputs(g, {0: 1}) assert m["g"].name == "g" # Test string slicing A = Const1D(1.1, name="A") B = Const1D(2.1, name="B") C = Const1D(3.1, name="C") M = A + B * C assert_allclose(M["B":"C"](1), 6.510000000000001) class _ConstraintsTestA(Model): stddev = Parameter(default=0, min=0, max=0.3) mean = Parameter(default=0, fixed=True) @staticmethod def evaluate(stddev, mean): return stddev, mean class _ConstraintsTestB(Model): mean = Parameter(default=0, fixed=True) @staticmethod def evaluate(mean): return mean def test_inherit_constraints(): """ Various tests for copying of constraint values between compound models and their members. Regression test for https://github.com/astropy/astropy/issues/3481 """ model = Gaussian1D(bounds={"stddev": (0, 0.3)}, fixed={"mean": True}) + Gaussian1D( fixed={"mean": True} ) # Lots of assertions in this test as there are multiple interfaces to # parameter constraints assert "stddev_0" in model.bounds assert model.bounds["stddev_0"] == (0, 0.3) assert model.stddev_0.bounds == (0, 0.3) assert "mean_0" in model.fixed assert model.fixed["mean_0"] is True assert model.mean_0.fixed is True assert "mean_1" in model.fixed assert model.fixed["mean_1"] is True assert model.mean_1.fixed is True assert model.stddev_0 is model[0].stddev # Great, all the constraints were inherited properly # Now what about if we update them through the sub-models? model.stddev_0.bounds = (0, 0.4) assert model[0].stddev.bounds == (0, 0.4) assert model[0].bounds["stddev"] == (0, 0.4) model.stddev_0.bounds = (0.1, 0.5) assert model[0].stddev.bounds == (0.1, 0.5) assert model[0].bounds["stddev"] == (0.1, 0.5) model[1].mean.fixed = False assert model.mean_1.fixed is False assert model[1].mean.fixed is False # Now turn off syncing of constraints assert model.bounds["stddev_0"] == (0.1, 0.5) model.sync_constraints = False model[0].stddev.bounds = (0, 0.2) assert model.bounds["stddev_0"] == (0.1, 0.5) model.sync_constraints = True assert model.bounds["stddev_0"] == (0, 0.2) def test_compound_custom_inverse(): """ Test that a compound model with a custom inverse has that inverse applied when the inverse of another model, of which it is a component, is computed. Regression test for https://github.com/astropy/astropy/issues/3542 """ poly = Polynomial1D(1, c0=1, c1=2) scale = Scale(1) shift = Shift(1) model1 = poly | scale model1.inverse = poly # model1 now has a custom inverse (the polynomial itself, ignoring the # trivial scale factor) model2 = shift | model1 assert_allclose(model2.inverse(1), (poly | shift.inverse)(1)) # Make sure an inverse is not allowed if the models were combined with the # wrong operator, or if one of the models doesn't have an inverse defined MESSAGE = ( r"No analytical or user-supplied inverse transform has been implemented for" r" this model" ) with pytest.raises(NotImplementedError, match=MESSAGE): (shift + model1).inverse with pytest.raises(NotImplementedError, match=MESSAGE): (model1 & poly).inverse def test_pickle_compound(): """ Regression test for https://github.com/astropy/astropy/issues/3867#issuecomment-114547228 """ # Test pickling a compound model instance g1 = Gaussian1D(1.0, 0.0, 0.1) g2 = Gaussian1D([2.0, 3.0], [0.0, 0.0], [0.2, 0.3]) m = g1 + g2 m2 = pickle.loads(pickle.dumps(m)) assert m.param_names == m2.param_names assert m.__class__.__name__ == m2.__class__.__name__ assert np.all(m.parameters == m2.parameters) assert np.all(m(0) == m2(0)) def test_update_parameters(): offx = Shift(1) scl = Scale(2) m = offx | scl assert m(1) == 4 offx.offset = 42 assert m(1) == 86 m.factor_1 = 100 assert m(1) == 4300 m2 = m | offx assert m2(1) == 4342 def test_name(): offx = Shift(1) scl = Scale(2) m = offx | scl scl.name = "scale" assert m.submodel_names == ("None_0", "scale") assert m.name is None m.name = "M" assert m.name == "M" m1 = m.rename("M1") assert m.name == "M1" assert m1.name == "M1" def test_name_index(): g1 = Gaussian1D(1, 1, 1) g2 = Gaussian1D(1, 2, 1) g = g1 + g2 MESSAGE = r"No component with name 'bozo' found" with pytest.raises(IndexError, match=MESSAGE): g["bozo"] g1.name = "bozo" assert g["bozo"].mean == 1 g2.name = "bozo" MESSAGE = r"Multiple components found using 'bozo' as name.*" with pytest.raises(IndexError, match=MESSAGE): g["bozo"] @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_tabular_in_compound(): """ Issue #7411 - evaluate should not change the shape of the output. """ t = Tabular1D(points=([1, 5, 7],), lookup_table=[12, 15, 19], bounds_error=False) rot = Rotation2D(2) p = Polynomial1D(1) x = np.arange(12).reshape((3, 4)) # Create a compound model which does not execute Tabular.__call__, # but model.evaluate and is followed by a Rotation2D which # checks the exact shapes. model = p & t | rot x1, y1 = model(x, x) assert x1.ndim == 2 assert y1.ndim == 2 def test_bounding_box(): g = Gaussian2D() + Gaussian2D(2, 0.5, 0.1, 2, 3, 0) g.bounding_box = ((0, 1), (0, 0.5)) y, x = np.mgrid[0:10, 0:10] y = y / 3.0 x = x / 3.0 val = g(x, y, with_bounding_box=True) # fmt: off compare = np.array( [ [2.93738984, 2.93792011, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [2.87857153, 2.88188761, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [2.70492922, 2.71529265, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [2.45969972, 2.47912103, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan] ] ) # fmt: on mask = ~np.isnan(val) assert_allclose(val[mask], compare[mask]) val2 = g(x + 2, y + 2, with_bounding_box=True) assert np.isnan(val2).sum() == 100 # val3 = g(.1, .1, with_bounding_box=True) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_bounding_box_with_units(): points = np.arange(5) * u.pix lt = np.arange(5) * u.AA t = Tabular1D(points, lt) assert t(1 * u.pix, with_bounding_box=True) == 1.0 * u.AA @pytest.mark.parametrize("poly", [Chebyshev1D(5), Legendre1D(5), Polynomial1D(5)]) def test_compound_with_polynomials_1d(poly): """ Tests that polynomials are offset when used in compound models. Issue #3699 """ poly.parameters = [1, 2, 3, 4, 1, 2] shift = Shift(3) model = poly | shift x = np.linspace(-5, 5, 10) result_compound = model(x) result = shift(poly(x)) assert_allclose(result, result_compound) assert model.param_names == ( "c0_0", "c1_0", "c2_0", "c3_0", "c4_0", "c5_0", "offset_1", ) def test_replace_submodel(): """ Replace a model in a Compound model """ S1 = Shift(2, name="shift2") | Scale(3, name="scale3") # First shift then scale S2 = Scale(2, name="scale2") | Shift(3, name="shift3") # First scale then shift m = S1 & S2 assert m(1, 2) == (9, 7) m2 = m.replace_submodel("scale3", Scale(4, name="scale4")) assert m2(1, 2) == (12, 7) assert m(1, 2) == (9, 7) # Check the inverse has been updated assert m2.inverse(12, 7) == (1, 2) # Produce the same result by replacing a single model with a compound m3 = m.replace_submodel("shift2", Shift(2) | Scale(2)) assert m(1, 2) == (9, 7) assert m3(1, 2) == (18, 7) # Check the inverse has been updated assert m3.inverse(18, 7) == (1, 2) # Test with arithmetic model compunding operator m = S1 + S2 assert m(1) == 14 m2 = m.replace_submodel("scale2", Scale(4, name="scale4")) assert m2(1) == 16 # Test with fix_inputs() R = fix_inputs(Rotation2D(angle=90, name="rotate"), {0: 1}) m4 = S1 | R assert_allclose(m4(0), (-6, 1)) m5 = m4.replace_submodel("rotate", Rotation2D(180)) assert_allclose(m5(0), (-1, -6)) # Check we get a value error when model name doesn't exist MESSAGE = r"No submodels found named not_there" with pytest.raises(ValueError, match=MESSAGE): m2 = m.replace_submodel("not_there", Scale(2)) # And now a model set P = Polynomial1D(degree=1, n_models=2, name="poly") S = Shift([1, 2], n_models=2) m = P | S assert_array_equal(m([0, 1]), (1, 2)) MESSAGE = r"New and old models must have equal values for n_models" with pytest.raises(ValueError, match=MESSAGE): m2 = m.replace_submodel("poly", Polynomial1D(degree=1, c0=1)) m2 = m.replace_submodel("poly", Polynomial1D(degree=1, c0=[1, 2], n_models=2)) assert_array_equal(m2([0, 1]), (2, 4)) # Ensure previous _user_inverse doesn't stick around S1 = Shift(1) S2 = Shift(2) S3 = Shift(3, name="S3") S23 = S2 | S3 S23.inverse = Shift(-4.9) m = S1 & S23 # This should delete the S23._user_inverse m2 = m.replace_submodel("S3", Shift(4)) assert m2(1, 2) == (2, 8) assert m2.inverse(2, 8) == (1, 2) @pytest.mark.parametrize( "expr", [ lambda m1, m2: m1 + m2, lambda m1, m2: m1 - m2, lambda m1, m2: m1 * m2, lambda m1, m2: m1 / m2, ], ) def test_compound_evaluate(expr): """ Tests that compound evaluate function produces the same result as the models with the operator applied """ x = np.linspace(-5, 5, 10) # Some evaluate functions assume that inputs are numpy arrays or quantities including Const1D p1 = np.array([1, 2, 3, 4, 1, 2]) p2 = np.array([1, 0, 0.5]) model1 = Polynomial1D(5) model2 = Gaussian1D(2, 1, 5) compound = expr(model1, model2) assert_array_equal( compound.evaluate(x, *p1, *p2), expr(model1.evaluate(x, *p1), model2.evaluate(x, *p2)), ) def test_compound_evaluate_power(): """ Tests that compound evaluate function produces the same result as the models with the power operator applied """ x = np.linspace(-5, 5, 10) p1 = np.array([1, 0, 0.2]) p2 = np.array([3]) model1 = Gaussian1D(2, 1, 5) model2 = Const1D(2) compound = model1**model2 assert_array_equal( compound.evaluate(x, *p1, *p2), model1.evaluate(x, *p1) ** model2.evaluate(x, *p2), ) def test_compound_evaluate_double_shift(): x = np.linspace(-5, 5, 10) y = np.linspace(-5, 5, 10) m1 = Gaussian2D(1, 0, 0, 1, 1, 1) m2 = Shift(1) m3 = Shift(2) m = Gaussian2D(1, 0, 0, 1, 1, 1) & Shift(1) & Shift(2) assert_array_equal( m.evaluate(x, y, x - 10, y + 20, 1, 0, 0, 1, 1, 1, 1, 2), [ m1.evaluate(x, y, 1, 0, 0, 1, 1, 1), m2.evaluate(x - 10, 1), m3.evaluate(y + 20, 2), ], ) @pytest.mark.parametrize( "expr", [ lambda m1, m2: m1 + m2, lambda m1, m2: m1 - m2, lambda m1, m2: m1 * m2, lambda m1, m2: m1 / m2, ], ) def test_compound_evaluate_named_param(expr): """ Tests that compound evaluate function produces the same result as the models with the operator applied """ x = np.linspace(-5, 5, 10) p1 = np.array([1, 0, 0.2]) p2 = np.array([3, 0.5, 0.5]) model1 = Gaussian1D(2, 1, 5) model2 = Gaussian1D(2, 1, 5) compound = expr(model1, model2) assert_array_equal( compound.evaluate(x, *p2, amplitude_0=p1[0], mean_0=p1[1], stddev_0=p1[2]), expr(model1.evaluate(x, *p1), model2.evaluate(x, *p2)), ) def test_compound_evaluate_name_param_power(): """ Tests that compound evaluate function produces the same result as the models with the power operator applied """ x = np.linspace(-5, 5, 10) p1 = np.array([1, 0, 0.2]) p2 = np.array([3]) model1 = Gaussian1D(2, 1, 5) model2 = Const1D(2) compound = model1**model2 assert_array_equal( compound.evaluate(x, *p2, amplitude_0=p1[0], mean_0=p1[1], stddev_0=p1[2]), model1.evaluate(x, *p1) ** model2.evaluate(x, *p2), ) def test_compound_evaluate_and(): """ Tests that compound evaluate function produces the same result as the models with the operator applied """ x = np.linspace(-5, 5, 10) p1 = np.array([1, 0.1, 0.5]) p2 = np.array([3]) model1 = Gaussian1D() model2 = Shift() compound = model1 & model2 assert_array_equal( compound.evaluate(x, x, *p1, p2), [model1.evaluate(x, *p1), model2.evaluate(x, p2)], ) def test_compound_evaluate_or(): """ Tests that compound evaluate function produces the same result as the models with the operator applied """ x = np.linspace(-5, 5, 10) p1 = np.array([0.5]) p2_amplitude = np.array([3]) p2_mean = np.array([0]) p2_std = np.array([0.1]) model1 = Shift(0.5) model2 = Gaussian1D(1, 0, 0.5) compound = model1 | model2 assert_array_equal( compound.evaluate(x, p1, p2_amplitude, p2_mean, p2_std), model2.evaluate(model1.evaluate(x, p1), p2_amplitude, p2_mean, p2_std), ) def test_compound_evaluate_fix_inputs_by_keyword(): """ Tests that compound evaluate function produces the same result as the models fix_inputs operator is applied when using the keyword """ y, x = np.mgrid[:10, :10] model_params = [3, 0, 0.1, 1, 0.5, 0] model = Gaussian2D(1, 2, 0, 0.5) compound = fix_inputs(model, {"x": x + 5}) assert_array_equal( compound.evaluate(x, y, *model_params), model.evaluate(x + 5, y, *model_params), ) def test_compound_evaluate_fix_inputs_by_position(): """ Tests that compound evaluate function produces the same result as the models fix_inputs operator is applied when using the input index """ y, x = np.mgrid[:10, :10] model_params = [3, 0, 0.1, 1, 0.5, 0] model = Gaussian2D(1, 2, 0, 0.5) compound = fix_inputs(model, {0: x + 5}) assert_array_equal( compound.evaluate(x, y, *model_params), model.evaluate(x + 5, y, *model_params), ) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_fit_multiplied_compound_model_with_mixed_units(): """ Regression test for issue #12320 """ fitter = LevMarLSQFitter() x = np.linspace(0, 1, 101) * u.s y = np.linspace(5, 10, 101) * u.m * u.kg / u.s m1 = Linear1D(slope=5 * u.m / u.s / u.s, intercept=1.0 * u.m / u.s) m2 = Linear1D(slope=0.0 * u.kg / u.s, intercept=10.0 * u.kg) truth = m1 * m2 fit = fitter(truth, x, y) unfit_output = truth(x) fit_output = fit(x) assert unfit_output.unit == fit_output.unit == (u.kg * u.m / u.s) assert_allclose(unfit_output, fit_output) for name in truth.param_names: assert getattr(truth, name) == getattr(fit, name) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_fit_multiplied_recursive_compound_model_with_mixed_units(): """ Regression test for issue #12320 """ fitter = LevMarLSQFitter() x = np.linspace(0, 1, 101) * u.s y = np.linspace(5, 10, 101) * u.m * u.m * u.kg / u.s m1 = Linear1D(slope=5 * u.m / u.s / u.s, intercept=1.0 * u.m / u.s) m2 = Linear1D(slope=0.0 * u.kg / u.s, intercept=10.0 * u.kg) m3 = Linear1D(slope=0.0 * u.m / u.s, intercept=10.0 * u.m) truth = m1 * m2 * m3 fit = fitter(truth, x, y) unfit_output = truth(x) fit_output = fit(x) assert unfit_output.unit == fit_output.unit == (u.kg * u.m * u.m / u.s) assert_allclose(unfit_output, fit_output) for name in truth.param_names: assert getattr(truth, name) == getattr(fit, name) x = np.linspace(0, 1, 101) * u.s y = np.linspace(5, 10, 101) * u.m * u.m * u.kg * u.kg / u.s m1 = Linear1D(slope=5 * u.m / u.s / u.s, intercept=1.0 * u.m / u.s) m2 = Linear1D(slope=0.0 * u.kg / u.s, intercept=10.0 * u.kg) m3 = Linear1D(slope=0.0 * u.m / u.s, intercept=10.0 * u.m) m4 = Linear1D(slope=0.0 * u.kg / u.s, intercept=10.0 * u.kg) m11 = m1 * m2 m22 = m3 * m4 truth = m11 * m22 fit = fitter(truth, x, y) unfit_output = truth(x) fit_output = fit(x) assert unfit_output.unit == fit_output.unit == (u.kg * u.kg * u.m * u.m / u.s) assert_allclose(unfit_output, fit_output) for name in truth.param_names: assert getattr(truth, name) == getattr(fit, name) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_fit_divided_compound_model_with_mixed_units(): """ Regression test for issue #12320 """ fitter = LevMarLSQFitter() x = np.linspace(0, 1, 101) * u.s y = np.linspace(5, 10, 101) * u.kg * u.m / u.s m1 = Linear1D(slope=5 * u.kg * u.m / u.s, intercept=1.0 * u.kg * u.m) m2 = Linear1D(slope=0.0 * u.s / u.s, intercept=10.0 * u.s) truth = m1 / m2 fit = fitter(truth, x, y) unfit_output = truth(x) fit_output = fit(x) assert unfit_output.unit == fit_output.unit == (u.kg * u.m / u.s) assert_allclose(unfit_output, fit_output) for name in truth.param_names: assert getattr(truth, name) == getattr(fit, name) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_fit_mixed_recursive_compound_model_with_mixed_units(): """ Regression test for issue #12320 """ fitter = LevMarLSQFitter() x = np.linspace(0, 1, 101) * u.s y = np.linspace(5, 10, 101) * u.kg * u.m * u.m / u.s m1 = Linear1D(slope=5 * u.kg * u.m / u.s, intercept=1.0 * u.kg * u.m) m2 = Linear1D(slope=0.0 * u.s / u.s, intercept=10.0 * u.s) m3 = Linear1D(slope=0.0 * u.m / u.s, intercept=10.0 * u.m) truth = m1 / m2 * m3 fit = fitter(truth, x, y) unfit_output = truth(x) fit_output = fit(x) assert unfit_output.unit == fit_output.unit == (u.kg * u.m * u.m / u.s) assert_allclose(unfit_output, fit_output) for name in truth.param_names: assert getattr(truth, name) == getattr(fit, name) x = np.linspace(0, 1, 101) * u.s y = np.linspace(5, 10, 101) * u.kg * u.kg * u.m * u.m / u.s m1 = Linear1D(slope=5 * u.kg * u.m / u.s, intercept=1.0 * u.kg * u.m) m2 = Linear1D(slope=0.0 * u.s / u.s, intercept=10.0 * u.s) m3 = Linear1D(slope=0.0 * u.m / u.s, intercept=10.0 * u.m) m4 = Linear1D(slope=0.0 * u.kg / u.s, intercept=10.0 * u.kg) m11 = m1 / m2 m22 = m3 * m4 truth = m11 * m22 fit = fitter(truth, x, y) unfit_output = truth(x) fit_output = fit(x) assert unfit_output.unit == fit_output.unit == (u.kg * u.kg * u.m * u.m / u.s) assert_allclose(unfit_output, fit_output) for name in truth.param_names: assert getattr(truth, name) == getattr(fit, name)
07ba68d8be063009a7eaa7b3e6164c9f2b6b491b213dce2812c56cd5ecacca1c
# Licensed under a 3-clause BSD style license - see LICENSE.rst # pylint: disable=invalid-name import numpy as np import pytest from astropy.convolution import convolve_models_fft from astropy.modeling.models import Const1D, Const2D from astropy.utils.compat.optional_deps import HAS_SCIPY @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_clear_cache(): m1 = Const1D() m2 = Const1D() model = convolve_models_fft(m1, m2, (-1, 1), 0.01) assert model._kwargs is None assert model._convolution is None results = model(0) assert results.all() == np.array([1.0]).all() assert model._kwargs is not None assert model._convolution is not None model.clear_cache() assert model._kwargs is None assert model._convolution is None @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_input_shape_1d(): m1 = Const1D() m2 = Const1D() model = convolve_models_fft(m1, m2, (-1, 1), 0.01) results = model(0) assert results.shape == (1,) x = np.arange(-1, 1, 0.1) results = model(x) assert results.shape == x.shape @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_input_shape_2d(): m1 = Const2D() m2 = Const2D() model = convolve_models_fft(m1, m2, ((-1, 1), (-1, 1)), 0.01) results = model(0, 0) assert results.shape == (1,) x = np.arange(-1, 1, 0.1) results = model(x, 0) assert results.shape == x.shape results = model(0, x) assert results.shape == x.shape grid = np.meshgrid(x, x) results = model(*grid) assert results.shape == grid[0].shape assert results.shape == grid[1].shape @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test__convolution_inputs(): m1 = Const2D() m2 = Const2D() model = convolve_models_fft(m1, m2, ((-1, 1), (-1, 1)), 0.01) x = np.arange(-1, 1, 0.1) y = np.arange(-2, 2, 0.1) grid0 = np.meshgrid(x, x) grid1 = np.meshgrid(y, y) # scalar inputs assert (np.array([1]), (1,)) == model._convolution_inputs(1) # Multiple inputs assert np.all( model._convolution_inputs(*grid0)[0] == np.reshape([grid0[0], grid0[1]], (2, -1)).T ) assert model._convolution_inputs(*grid0)[1] == grid0[0].shape assert np.all( model._convolution_inputs(*grid1)[0] == np.reshape([grid1[0], grid1[1]], (2, -1)).T ) assert model._convolution_inputs(*grid1)[1] == grid1[0].shape # Error with pytest.raises(ValueError, match=r"Values have differing shapes"): model._convolution_inputs(grid0[0], grid1[1])
2541de074b7b78651bf8e3182f5b37b9fa4f049f5b58e02b1cf0e80a5840d6d5
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np import pytest from astropy import units as u from astropy.modeling.core import Model, fix_inputs from astropy.modeling.models import Polynomial1D class _ExampleModel(Model): n_inputs = 1 n_outputs = 1 def __init__(self): self._input_units = {"x": u.m} self._return_units = {"y": u.m / u.s} super().__init__() def evaluate(self, input): return input / u.Quantity(1, u.s) def _models_with_units(): m1 = _ExampleModel() & _ExampleModel() m2 = _ExampleModel() + _ExampleModel() p = Polynomial1D(1) p._input_units = {"x": u.m / u.s} p._return_units = {"y": u.m / u.s} m3 = _ExampleModel() | p m4 = fix_inputs(m1, {"x0": 1}) m5 = fix_inputs(m1, {0: 1}) models = [m1, m2, m3, m4, m5] input_units = [ {"x0": u.Unit("m"), "x1": u.Unit("m")}, {"x": u.Unit("m")}, {"x": u.Unit("m")}, {"x1": u.Unit("m")}, {"x1": u.Unit("m")}, ] return_units = [ {"y0": u.Unit("m / s"), "y1": u.Unit("m / s")}, {"y": u.Unit("m / s")}, {"y": u.Unit("m / s")}, {"y0": u.Unit("m / s"), "y1": u.Unit("m / s")}, {"y0": u.Unit("m / s"), "y1": u.Unit("m / s")}, ] return np.array([models, input_units, return_units], dtype=object).T @pytest.mark.parametrize(("model", "input_units", "return_units"), _models_with_units()) def test_input_units(model, input_units, return_units): """Test input_units on various compound models.""" assert model.input_units == input_units assert model.return_units == return_units
ae4af84abbb0439cee03ed191d19d39a287d32c47849b2270cd95a4a602e8d57
# Licensed under a 3-clause BSD style license - see LICENSE.rst # pylint: disable=invalid-name import os import subprocess import sys import unittest.mock as mk from inspect import signature import numpy as np import pytest from numpy.testing import assert_allclose, assert_equal import astropy import astropy.modeling.core as core import astropy.units as u from astropy.convolution import convolve_models from astropy.modeling import models from astropy.modeling.bounding_box import CompoundBoundingBox, ModelBoundingBox from astropy.modeling.core import ( SPECIAL_OPERATORS, CompoundModel, Model, _add_special_operator, bind_bounding_box, bind_compound_bounding_box, custom_model, fix_inputs, ) from astropy.modeling.parameters import Parameter from astropy.modeling.separable import separability_matrix from astropy.tests.helper import assert_quantity_allclose from astropy.utils.compat.optional_deps import HAS_SCIPY class NonFittableModel(Model): """An example class directly subclassing Model for testing.""" a = Parameter() def __init__(self, a, model_set_axis=None): super().__init__(a, model_set_axis=model_set_axis) @staticmethod def evaluate(): pass def test_Model_instance_repr_and_str(): m = NonFittableModel(42.5) assert repr(m) == "<NonFittableModel(a=42.5)>" assert ( str(m) == "Model: NonFittableModel\n" "Inputs: ()\n" "Outputs: ()\n" "Model set size: 1\n" "Parameters:\n" " a \n" " ----\n" " 42.5" ) assert len(m) == 1 def test_Model_array_parameter(): model = models.Gaussian1D(4, 2, 1) assert_allclose(model.param_sets, [[4], [2], [1]]) def test_inputless_model(): """ Regression test for https://github.com/astropy/astropy/pull/3772#issuecomment-101821641 """ class TestModel(Model): n_outputs = 1 a = Parameter() @staticmethod def evaluate(a): return a m = TestModel(1) assert m.a == 1 assert m() == 1 # Test array-like output m = TestModel([1, 2, 3], model_set_axis=False) assert len(m) == 1 assert np.all(m() == [1, 2, 3]) # Test a model set m = TestModel(a=[1, 2, 3], model_set_axis=0) assert len(m) == 3 assert np.all(m() == [1, 2, 3]) # Test a model set m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=0) assert len(m) == 2 assert np.all(m() == [[1, 2, 3], [4, 5, 6]]) # Test a model set m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=np.int64(0)) assert len(m) == 2 assert np.all(m() == [[1, 2, 3], [4, 5, 6]]) def test_ParametericModel(): MESSAGE = r"Gaussian1D.__init__.* got an unrecognized parameter 'wrong'" with pytest.raises(TypeError, match=MESSAGE): models.Gaussian1D(1, 2, 3, wrong=4) def test_custom_model_signature(): """ Tests that the signatures for the __init__ and __call__ methods of custom models are useful. """ @custom_model def model_a(x): return x assert model_a.param_names == () assert model_a.n_inputs == 1 sig = signature(model_a.__init__) assert list(sig.parameters.keys()) == ["self", "args", "meta", "name", "kwargs"] sig = signature(model_a.__call__) assert list(sig.parameters.keys()) == [ "self", "inputs", "model_set_axis", "with_bounding_box", "fill_value", "equivalencies", "inputs_map", "new_inputs", ] @custom_model def model_b(x, a=1, b=2): return x + a + b assert model_b.param_names == ("a", "b") assert model_b.n_inputs == 1 sig = signature(model_b.__init__) assert list(sig.parameters.keys()) == ["self", "a", "b", "kwargs"] assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty] sig = signature(model_b.__call__) assert list(sig.parameters.keys()) == [ "self", "inputs", "model_set_axis", "with_bounding_box", "fill_value", "equivalencies", "inputs_map", "new_inputs", ] @custom_model def model_c(x, y, a=1, b=2): return x + y + a + b assert model_c.param_names == ("a", "b") assert model_c.n_inputs == 2 sig = signature(model_c.__init__) assert list(sig.parameters.keys()) == ["self", "a", "b", "kwargs"] assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty] sig = signature(model_c.__call__) assert list(sig.parameters.keys()) == [ "self", "inputs", "model_set_axis", "with_bounding_box", "fill_value", "equivalencies", "inputs_map", "new_inputs", ] def test_custom_model_subclass(): """Test that custom models can be subclassed.""" @custom_model def model_a(x, a=1): return x * a class model_b(model_a): # Override the evaluate from model_a @classmethod def evaluate(cls, x, a): return -super().evaluate(x, a) b = model_b() assert b.param_names == ("a",) assert b.a == 1 assert b(1) == -1 sig = signature(model_b.__init__) assert list(sig.parameters.keys()) == ["self", "a", "kwargs"] sig = signature(model_b.__call__) assert list(sig.parameters.keys()) == [ "self", "inputs", "model_set_axis", "with_bounding_box", "fill_value", "equivalencies", "inputs_map", "new_inputs", ] def test_custom_model_parametrized_decorator(): """Tests using custom_model as a decorator with parameters.""" def cosine(x, amplitude=1): return [amplitude * np.cos(x)] @custom_model(fit_deriv=cosine) def sine(x, amplitude=1): return amplitude * np.sin(x) assert issubclass(sine, Model) s = sine(2) assert_allclose(s(np.pi / 2), 2) assert_allclose(s.fit_deriv(0, 2), 2) def test_custom_model_n_outputs(): """ Test creating a custom_model which has more than one output, which requires special handling. Demonstrates issue #11791's ``n_outputs`` error has been solved """ @custom_model def model(x, y, n_outputs=2): return x + 1, y + 1 m = model() assert not isinstance(m.n_outputs, Parameter) assert isinstance(m.n_outputs, int) assert m.n_outputs == 2 assert m.outputs == ("x0", "x1") assert ( separability_matrix(m) == [ [True, True], [True, True], ] ).all() @custom_model def model(x, y, z, n_outputs=3): return x + 1, y + 1, z + 1 m = model() assert not isinstance(m.n_outputs, Parameter) assert isinstance(m.n_outputs, int) assert m.n_outputs == 3 assert m.outputs == ("x0", "x1", "x2") assert ( separability_matrix(m) == [ [True, True, True], [True, True, True], [True, True, True], ] ).all() def test_custom_model_settable_parameters(): """ Test creating a custom_model which specifically sets adjustable model parameters. Demonstrates part of issue #11791's notes about what passed parameters should/shouldn't be allowed. In this case, settable parameters should be allowed to have defaults set. """ @custom_model def model(x, y, n_outputs=2, bounding_box=((1, 2), (3, 4))): return x + 1, y + 1 m = model() assert m.n_outputs == 2 assert m.bounding_box == ((1, 2), (3, 4)) m.bounding_box = ((9, 10), (11, 12)) assert m.bounding_box == ((9, 10), (11, 12)) m = model(bounding_box=((5, 6), (7, 8))) assert m.n_outputs == 2 assert m.bounding_box == ((5, 6), (7, 8)) m.bounding_box = ((9, 10), (11, 12)) assert m.bounding_box == ((9, 10), (11, 12)) @custom_model def model(x, y, n_outputs=2, outputs=("z0", "z1")): return x + 1, y + 1 m = model() assert m.n_outputs == 2 assert m.outputs == ("z0", "z1") m.outputs = ("a0", "a1") assert m.outputs == ("a0", "a1") m = model(outputs=("w0", "w1")) assert m.n_outputs == 2 assert m.outputs == ("w0", "w1") m.outputs = ("a0", "a1") assert m.outputs == ("a0", "a1") def test_custom_model_regected_parameters(): """ Test creating a custom_model which attempts to override non-overridable parameters. Demonstrates part of issue #11791's notes about what passed parameters should/shouldn't be allowed. In this case, non-settable parameters should raise an error (unexpected behavior may occur). """ with pytest.raises( ValueError, match=r"Parameter 'n_inputs' cannot be a model property: *" ): @custom_model def model1(x, y, n_outputs=2, n_inputs=3): return x + 1, y + 1 with pytest.raises( ValueError, match=r"Parameter 'uses_quantity' cannot be a model property: *" ): @custom_model def model2(x, y, n_outputs=2, uses_quantity=True): return x + 1, y + 1 def test_custom_inverse(): """Test setting a custom inverse on a model.""" p = models.Polynomial1D(1, c0=-2, c1=3) # A trivial inverse for a trivial polynomial inv = models.Polynomial1D(1, c0=(2.0 / 3.0), c1=(1.0 / 3.0)) MESSAGE = ( r"No analytical or user-supplied inverse transform has been implemented for" r" this model" ) with pytest.raises(NotImplementedError, match=MESSAGE): p.inverse p.inverse = inv x = np.arange(100) assert_allclose(x, p(p.inverse(x))) assert_allclose(x, p.inverse(p(x))) p.inverse = None with pytest.raises(NotImplementedError, match=MESSAGE): p.inverse def test_custom_inverse_reset(): """Test resetting a custom inverse to the model's default inverse.""" class TestModel(Model): n_inputs = 0 outputs = ("y",) @property def inverse(self): return models.Shift() @staticmethod def evaluate(): return 0 # The above test model has no meaning, nor does its inverse--this just # tests that setting an inverse and resetting to the default inverse works m = TestModel() assert isinstance(m.inverse, models.Shift) m.inverse = models.Scale() assert isinstance(m.inverse, models.Scale) del m.inverse assert isinstance(m.inverse, models.Shift) def test_render_model_2d(): imshape = (71, 141) image = np.zeros(imshape) coords = y, x = np.indices(imshape) model = models.Gaussian2D(x_stddev=6.1, y_stddev=3.9, theta=np.pi / 3) # test points for edges ye, xe = [0, 35, 70], [0, 70, 140] # test points for floating point positions yf, xf = [35.1, 35.5, 35.9], [70.1, 70.5, 70.9] test_pts = [(a, b) for a in xe for b in ye] test_pts += [(a, b) for a in xf for b in yf] for x0, y0 in test_pts: model.x_mean = x0 model.y_mean = y0 expected = model(x, y) for xy in [coords, None]: for im in [image.copy(), None]: if (im is None) & (xy is None): # this case is tested in Fittable2DModelTester continue actual = model.render(out=im, coords=xy) if im is None: assert_allclose(actual, model.render(coords=xy)) # assert images match assert_allclose(expected, actual, atol=3e-7) # assert model fully captured if (x0, y0) == (70, 35): boxed = model.render() flux = np.sum(expected) assert ((flux - np.sum(boxed)) / flux) < 1e-7 # test an error is raised when the bounding box is larger than the input array try: actual = model.render(out=np.zeros((1, 1))) except ValueError: pass def test_render_model_1d(): npix = 101 image = np.zeros(npix) coords = np.arange(npix) model = models.Gaussian1D() # test points test_pts = [0, 49.1, 49.5, 49.9, 100] # test widths test_stdv = np.arange(5.5, 6.7, 0.2) for x0, stdv in [(p, s) for p in test_pts for s in test_stdv]: model.mean = x0 model.stddev = stdv expected = model(coords) for x in [coords, None]: for im in [image.copy(), None]: if (im is None) & (x is None): # this case is tested in Fittable1DModelTester continue actual = model.render(out=im, coords=x) # assert images match assert_allclose(expected, actual, atol=3e-7) # assert model fully captured if (x0, stdv) == (49.5, 5.5): boxed = model.render() flux = np.sum(expected) assert ((flux - np.sum(boxed)) / flux) < 1e-7 @pytest.mark.filterwarnings("ignore:invalid value encountered in less") def test_render_model_3d(): imshape = (17, 21, 27) image = np.zeros(imshape) coords = np.indices(imshape) def ellipsoid(x, y, z, x0=13.0, y0=10.0, z0=8.0, a=4.0, b=3.0, c=2.0, amp=1.0): rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2 val = (rsq < 1) * amp return val class Ellipsoid3D(custom_model(ellipsoid)): @property def bounding_box(self): return ( (self.z0 - self.c, self.z0 + self.c), (self.y0 - self.b, self.y0 + self.b), (self.x0 - self.a, self.x0 + self.a), ) model = Ellipsoid3D() # test points for edges ze, ye, xe = [0, 8, 16], [0, 10, 20], [0, 13, 26] # test points for floating point positions zf, yf, xf = [8.1, 8.5, 8.9], [10.1, 10.5, 10.9], [13.1, 13.5, 13.9] test_pts = [(x, y, z) for x in xe for y in ye for z in ze] test_pts += [(x, y, z) for x in xf for y in yf for z in zf] for x0, y0, z0 in test_pts: model.x0 = x0 model.y0 = y0 model.z0 = z0 expected = model(*coords[::-1]) for c in [coords, None]: for im in [image.copy(), None]: if (im is None) & (c is None): continue actual = model.render(out=im, coords=c) boxed = model.render() # assert images match assert_allclose(expected, actual) # assert model fully captured if (z0, y0, x0) == (8, 10, 13): boxed = model.render() assert (np.sum(expected) - np.sum(boxed)) == 0 def test_render_model_out_dtype(): """Test different out.dtype for model.render.""" MESSAGE = ( r"Cannot cast ufunc 'add' output from .* to .* with casting rule 'same_kind" ) for model in [models.Gaussian2D(), models.Gaussian2D() + models.Planar2D()]: for dtype in [np.float64, np.float32, np.complex64]: im = np.zeros((40, 40), dtype=dtype) imout = model.render(out=im) assert imout is im assert imout.sum() != 0 with pytest.raises(TypeError, match=MESSAGE): im = np.zeros((40, 40), dtype=np.int32) imout = model.render(out=im) def test_custom_bounding_box_1d(): """ Tests that the bounding_box setter works. """ # 1D models g1 = models.Gaussian1D() bb = g1.bounding_box expected = g1.render() # assign the same bounding_box, now through the bounding_box setter g1.bounding_box = bb assert_allclose(g1.render(), expected) # 2D models g2 = models.Gaussian2D() bb = g2.bounding_box expected = g2.render() # assign the same bounding_box, now through the bounding_box setter g2.bounding_box = bb assert_allclose(g2.render(), expected) def test_n_submodels_in_single_models(): assert models.Gaussian1D().n_submodels == 1 assert models.Gaussian2D().n_submodels == 1 def test_compound_deepcopy(): model = (models.Gaussian1D(10, 2, 3) | models.Shift(2)) & models.Rotation2D(21.3) new_model = model.deepcopy() assert id(model) != id(new_model) assert id(model._leaflist) != id(new_model._leaflist) assert id(model[0]) != id(new_model[0]) assert id(model[1]) != id(new_model[1]) assert id(model[2]) != id(new_model[2]) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_units_with_bounding_box(): points = np.arange(10, 20) table = np.arange(10) * u.Angstrom t = models.Tabular1D(points, lookup_table=table) assert isinstance(t(10), u.Quantity) assert isinstance(t(10, with_bounding_box=True), u.Quantity) assert_quantity_allclose(t(10), t(10, with_bounding_box=True)) RENAMED_MODEL = models.Gaussian1D.rename("CustomGaussian") MODEL_RENAME_CODE = """ from astropy.modeling.models import Gaussian1D print(repr(Gaussian1D)) print(repr(Gaussian1D.rename('CustomGaussian'))) """.strip() MODEL_RENAME_EXPECTED = b""" <class 'astropy.modeling.functional_models.Gaussian1D'> Name: Gaussian1D N_inputs: 1 N_outputs: 1 Fittable parameters: ('amplitude', 'mean', 'stddev') <class '__main__.CustomGaussian'> Name: CustomGaussian (Gaussian1D) N_inputs: 1 N_outputs: 1 Fittable parameters: ('amplitude', 'mean', 'stddev') """.strip() def test_rename_path(tmp_path): # Regression test for a bug that caused the path to the class to be # incorrect in a renamed model's __repr__. assert ( repr(RENAMED_MODEL).splitlines()[0] == "<class 'astropy.modeling.tests.test_core.CustomGaussian'>" ) # Make sure that when called from a user script, the class name includes # __main__. env = os.environ.copy() paths = [os.path.dirname(astropy.__path__[0])] + sys.path env["PYTHONPATH"] = os.pathsep.join(paths) script = tmp_path / "rename.py" with open(script, "w") as f: f.write(MODEL_RENAME_CODE) output = subprocess.check_output([sys.executable, script], env=env) assert output.splitlines() == MODEL_RENAME_EXPECTED.splitlines() @pytest.mark.parametrize( "model_class", [models.Gaussian1D, models.Polynomial1D, models.Shift, models.Tabular1D], ) def test_rename_1d(model_class): new_model = model_class.rename(name="Test1D") assert new_model.name == "Test1D" @pytest.mark.parametrize( "model_class", [models.Gaussian2D, models.Polynomial2D, models.Tabular2D] ) def test_rename_2d(model_class): new_model = model_class.rename(name="Test2D") assert new_model.name == "Test2D" def test_fix_inputs_integer(): """ Tests that numpy integers can be passed as dictionary keys to fix_inputs Issue #11358 """ m = models.Identity(2) mf = models.fix_inputs(m, {1: 22}) assert mf(1) == (1, 22) mf_int32 = models.fix_inputs(m, {np.int32(1): 33}) assert mf_int32(1) == (1, 33) mf_int64 = models.fix_inputs(m, {np.int64(1): 44}) assert mf_int64(1) == (1, 44) def test_fix_inputs_empty_dict(): """ Tests that empty dictionary can be passed to fix_inputs Issue #11355 """ m = models.Identity(2) mf = models.fix_inputs(m, {}) assert mf(1, 2) == (1, 2) def test_rename_inputs_outputs(): g2 = models.Gaussian2D(10, 2, 3, 1, 2) assert g2.inputs == ("x", "y") assert g2.outputs == ("z",) MESSAGE = r"Expected .* number of .*, got .*" with pytest.raises(ValueError, match=MESSAGE): g2.inputs = ("w",) with pytest.raises(ValueError, match=MESSAGE): g2.outputs = ("w", "e") def test__prepare_output_single_model(): model = models.Gaussian1D() # No broadcast assert ( np.array([1, 2]) == model._prepare_output_single_model(np.array([1, 2]), None) ).all() # Broadcast to scalar assert 1 == model._prepare_output_single_model(np.array([1]), ()) assert 2 == model._prepare_output_single_model(np.asanyarray(2), ()) # Broadcast reshape output = np.array([[1, 2, 3], [4, 5, 6]]) reshape = np.array([[1, 2], [3, 4], [5, 6]]) assert (output == model._prepare_output_single_model(output, (2, 3))).all() assert (reshape == model._prepare_output_single_model(output, (3, 2))).all() # Broadcast reshape scalar assert 1 == model._prepare_output_single_model(np.array([1]), (1, 2)) assert 2 == model._prepare_output_single_model(np.asanyarray(2), (3, 4)) # Fail to broadcast assert (output == model._prepare_output_single_model(output, (1, 2))).all() assert (output == model._prepare_output_single_model(output, (3, 4))).all() def test_prepare_outputs_mixed_broadcast(): """ Tests that _prepare_outputs_single_model does not fail when a smaller array is passed as first input, but output is broadcast to larger array. Issue #10170 """ model = models.Gaussian2D(1, 2, 3, 4, 5) output = model([1, 2], 3) assert output.shape == (2,) np.testing.assert_array_equal(output, [0.9692332344763441, 1.0]) output = model(4, [5, 6]) assert output.shape == (2,) np.testing.assert_array_equal(output, [0.8146473164114145, 0.7371233743916278]) def test_prepare_outputs_complex_reshape(): x = np.array( [ [1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15], ] ) y = np.array( [ [16, 17, 18, 19, 20], [21, 22, 23, 24, 25], [26, 27, 28, 29, 30], ] ) m = models.Identity(3) | models.Mapping((2, 1, 0)) m.bounding_box = ((0, 100), (0, 200), (0, 50)) mf = models.fix_inputs(m, {2: 22}) t = mf | models.Mapping((2, 1), n_inputs=3) output = mf(1, 2) assert output == (22, 2, 1) output = t(1, 2) assert output == (1, 2) output = t(x, y) assert len(output) == 2 np.testing.assert_array_equal(output[0], x) np.testing.assert_array_equal(output[1], y) m = models.Identity(3) | models.Mapping((0, 1, 2)) m.bounding_box = ((0, 100), (0, 200), (0, 50)) mf = models.fix_inputs(m, {2: 22}) t = mf | models.Mapping((0, 1), n_inputs=3) output = mf(1, 2) assert output == (1, 2, 22) output = t(1, 2) assert output == (1, 2) output = t(x, y) assert len(output) == 2 np.testing.assert_array_equal(output[0], x) np.testing.assert_array_equal(output[1], y) def test_prepare_outputs_single_entry_vector(): """ jwst and gwcs both require that single entry vectors produce single entry output vectors, not scalars. This tests for that behavior. """ model = models.Gaussian2D(1, 2, 3, 4, 5) output = model(np.array([1]), np.array([2])) assert output.shape == (1,) np.testing.assert_allclose(output, [0.9500411305585278]) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") @pytest.mark.filterwarnings("ignore: Using a non-tuple") def test_prepare_outputs_sparse_grid(): """ Test to show that #11060 has been solved. """ shape = (3, 3) data = np.arange(np.product(shape)).reshape(shape) * u.m / u.s points_unit = u.pix points = [np.arange(size) * points_unit for size in shape] kwargs = { "bounds_error": False, "fill_value": np.nan, "method": "nearest", } transform = models.Tabular2D(points, data, **kwargs) truth = ( np.array( [ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0], ] ) * u.m / u.s ) points = np.meshgrid(np.arange(3), np.arange(3), indexing="ij", sparse=True) x = points[0] * u.pix y = points[1] * u.pix value = transform(x, y) assert (value == truth).all() points = ( np.meshgrid(np.arange(3), np.arange(3), indexing="ij", sparse=False) * u.pix ) value = transform(*points) assert (value == truth).all() def test_coerce_units(): model = models.Polynomial1D(1, c0=1, c1=2) MESSAGE = r"Can only apply 'add' function to dimensionless quantities when other .*" with pytest.raises(u.UnitsError, match=MESSAGE): model(u.Quantity(10, u.m)) with_input_units = model.coerce_units({"x": u.m}) result = with_input_units(u.Quantity(10, u.m)) assert np.isclose(result, 21.0) with_input_units_tuple = model.coerce_units((u.m,)) result = with_input_units_tuple(u.Quantity(10, u.m)) assert np.isclose(result, 21.0) with_return_units = model.coerce_units(return_units={"y": u.s}) result = with_return_units(10) assert np.isclose(result.value, 21.0) assert result.unit == u.s with_return_units_tuple = model.coerce_units(return_units=(u.s,)) result = with_return_units_tuple(10) assert np.isclose(result.value, 21.0) assert result.unit == u.s with_both = model.coerce_units({"x": u.m}, {"y": u.s}) result = with_both(u.Quantity(10, u.m)) assert np.isclose(result.value, 21.0) assert result.unit == u.s with pytest.raises( ValueError, match=r"input_units keys.*do not match model inputs" ): model.coerce_units({"q": u.m}) with pytest.raises(ValueError, match=r"input_units length does not match n_inputs"): model.coerce_units((u.m, u.s)) model_with_existing_input_units = models.BlackBody() with pytest.raises( ValueError, match=r"Cannot specify input_units for model with existing input units", ): model_with_existing_input_units.coerce_units({"x": u.m}) with pytest.raises( ValueError, match=r"return_units keys.*do not match model outputs" ): model.coerce_units(return_units={"q": u.m}) with pytest.raises( ValueError, match=r"return_units length does not match n_outputs" ): model.coerce_units(return_units=(u.m, u.s)) def test_bounding_box_general_inverse(): model = NonFittableModel(42.5) MESSAGE = r"No bounding box is defined for this model" with pytest.raises(NotImplementedError, match=MESSAGE): model.bounding_box model.bounding_box = () assert model.bounding_box.bounding_box() == () model.inverse = NonFittableModel(3.14) inverse_model = model.inverse with pytest.raises(NotImplementedError, match=MESSAGE): inverse_model.bounding_box def test__add_special_operator(): sop_name = "name" sop = "value" key = _add_special_operator(sop_name, "value") assert key[0] == sop_name assert key[1] == SPECIAL_OPERATORS._unique_id assert key in SPECIAL_OPERATORS assert SPECIAL_OPERATORS[key] == sop def test_print_special_operator_CompoundModel(capsys): """ Test that issue #11310 has been fixed """ model = convolve_models(models.Sersic2D(), models.Gaussian2D()) with astropy.conf.set_temp("max_width", 80): # fmt: off assert str(model) == ( "Model: CompoundModel\n" "Inputs: ('x', 'y')\n" "Outputs: ('z',)\n" "Model set size: 1\n" "Expression: convolve_fft (([0]), ([1]))\n" "Components: \n" " [0]: <Sersic2D(amplitude=1., r_eff=1., n=4., " "x_0=0., y_0=0., ellip=0., theta=0.)>\n" "\n" " [1]: <Gaussian2D(amplitude=1., x_mean=0., y_mean=0., " "x_stddev=1., y_stddev=1., theta=0.)>\n" "Parameters:\n" " amplitude_0 r_eff_0 n_0 x_0_0 y_0_0 ... y_mean_1 x_stddev_1 y_stddev_1 theta_1\n" " ----------- ------- --- ----- ----- ... -------- ---------- ---------- -------\n" " 1.0 1.0 4.0 0.0 0.0 ... 0.0 1.0 1.0 0.0" ) # fmt: on def test__validate_input_shape(): model = models.Gaussian1D() model._n_models = 2 _input = np.array( [ [1, 2, 3], [4, 5, 6], ] ) # Successful validation assert model._validate_input_shape(_input, 0, model.inputs, 1, False) == (2, 3) # Fail number of axes MESSAGE = r"For model_set_axis=2, all inputs must be at least 3-dimensional" with pytest.raises(ValueError, match=MESSAGE): model._validate_input_shape(_input, 0, model.inputs, 2, True) # Fail number of models (has argname) MESSAGE = r"Input argument '.*' does not have the correct dimensions in .*" with pytest.raises(ValueError, match=MESSAGE): model._validate_input_shape(_input, 0, model.inputs, 1, True) # Fail number of models (no argname) with pytest.raises(ValueError, match=MESSAGE): model._validate_input_shape(_input, 0, [], 1, True) def test__validate_input_shapes(): model = models.Gaussian1D() model._n_models = 2 inputs = [mk.MagicMock() for _ in range(3)] argnames = mk.MagicMock() model_set_axis = mk.MagicMock() all_shapes = [mk.MagicMock() for _ in inputs] # Successful validation with mk.patch.object( Model, "_validate_input_shape", autospec=True, side_effect=all_shapes ) as mkValidate: with mk.patch.object(core, "check_broadcast", autospec=True) as mkCheck: assert mkCheck.return_value == model._validate_input_shapes( inputs, argnames, model_set_axis ) assert mkCheck.call_args_list == [mk.call(*all_shapes)] assert mkValidate.call_args_list == [ mk.call(model, _input, idx, argnames, model_set_axis, True) for idx, _input in enumerate(inputs) ] # Fail check_broadcast MESSAGE = r"All inputs must have identical shapes or must be scalars" with mk.patch.object( Model, "_validate_input_shape", autospec=True, side_effect=all_shapes ) as mkValidate: with mk.patch.object( core, "check_broadcast", autospec=True, return_value=None ) as mkCheck: with pytest.raises(ValueError, match=MESSAGE): model._validate_input_shapes(inputs, argnames, model_set_axis) assert mkCheck.call_args_list == [mk.call(*all_shapes)] assert mkValidate.call_args_list == [ mk.call(model, _input, idx, argnames, model_set_axis, True) for idx, _input in enumerate(inputs) ] def test__remove_axes_from_shape(): model = models.Gaussian1D() # len(shape) == 0 assert model._remove_axes_from_shape((), mk.MagicMock()) == () # axis < 0 assert model._remove_axes_from_shape((1, 2, 3), -1) == (1, 2) assert model._remove_axes_from_shape((1, 2, 3), -2) == (1, 3) assert model._remove_axes_from_shape((1, 2, 3), -3) == (2, 3) # axis >= len(shape) assert model._remove_axes_from_shape((1, 2, 3), 3) == () assert model._remove_axes_from_shape((1, 2, 3), 4) == () # 0 <= axis < len(shape) assert model._remove_axes_from_shape((1, 2, 3), 0) == (2, 3) assert model._remove_axes_from_shape((1, 2, 3), 1) == (3,) assert model._remove_axes_from_shape((1, 2, 3), 2) == () def test_get_bounding_box(): model = models.Const2D(2) # No with_bbox assert model.get_bounding_box(False) is None # No bounding_box MESSAGE = r"No bounding box is defined for this model" with pytest.raises(NotImplementedError, match=MESSAGE): model.bounding_box assert model.get_bounding_box(True) is None # Normal bounding_box model.bounding_box = ((0, 1), (0, 1)) assert not isinstance(model.bounding_box, CompoundBoundingBox) assert model.get_bounding_box(True) == ((0, 1), (0, 1)) # CompoundBoundingBox with no removal bbox = CompoundBoundingBox.validate( model, {(1,): ((-1, 0), (-1, 0)), (2,): ((0, 1), (0, 1))}, selector_args=[("y", False)], ) model.bounding_box = bbox assert isinstance(model.bounding_box, CompoundBoundingBox) # Get using argument not with_bbox assert model.get_bounding_box(True) == bbox # Get using with_bbox not argument assert model.get_bounding_box((1,)) == ((-1, 0), (-1, 0)) assert model.get_bounding_box((2,)) == ((0, 1), (0, 1)) def test_compound_bounding_box(): model = models.Gaussian1D() truth = models.Gaussian1D() bbox1 = CompoundBoundingBox.validate( model, {(1,): (-1, 0), (2,): (0, 1)}, selector_args=[("x", False)] ) bbox2 = CompoundBoundingBox.validate( model, {(-0.5,): (-1, 0), (0.5,): (0, 1)}, selector_args=[("x", False)] ) # Using with_bounding_box to pass a selector model.bounding_box = bbox1 assert model(-0.5) == truth(-0.5) assert model(-0.5, with_bounding_box=(1,)) == truth(-0.5) assert np.isnan(model(-0.5, with_bounding_box=(2,))) assert model(0.5) == truth(0.5) assert model(0.5, with_bounding_box=(2,)) == truth(0.5) assert np.isnan(model(0.5, with_bounding_box=(1,))) # Using argument value to pass bounding_box model.bounding_box = bbox2 assert model(-0.5) == truth(-0.5) assert model(-0.5, with_bounding_box=True) == truth(-0.5) assert model(0.5) == truth(0.5) assert model(0.5, with_bounding_box=True) == truth(0.5) MESSAGE = r"No bounding box is defined for selector: .*" with pytest.raises(RuntimeError, match=MESSAGE): model(0, with_bounding_box=True) model1 = models.Gaussian1D() truth1 = models.Gaussian1D() model2 = models.Const1D(2) truth2 = models.Const1D(2) model = model1 + model2 truth = truth1 + truth2 assert isinstance(model, CompoundModel) model.bounding_box = bbox1 assert model(-0.5) == truth(-0.5) assert model(-0.5, with_bounding_box=1) == truth(-0.5) assert np.isnan(model(-0.5, with_bounding_box=(2,))) assert model(0.5) == truth(0.5) assert model(0.5, with_bounding_box=2) == truth(0.5) assert np.isnan(model(0.5, with_bounding_box=(1,))) model.bounding_box = bbox2 assert model(-0.5) == truth(-0.5) assert model(-0.5, with_bounding_box=True) == truth(-0.5) assert model(0.5) == truth(0.5) assert model(0.5, with_bounding_box=True) == truth(0.5) with pytest.raises(RuntimeError, match=MESSAGE): model(0, with_bounding_box=True) def test_bind_bounding_box(): model = models.Polynomial2D(3) bbox = ((-1, 1), (-2, 2)) bind_bounding_box(model, bbox) assert model.get_bounding_box() is not None assert model.bounding_box == bbox assert model.bounding_box["x"] == (-2, 2) assert model.bounding_box["y"] == (-1, 1) bind_bounding_box(model, bbox, order="F") assert model.get_bounding_box() is not None assert model.bounding_box == bbox assert model.bounding_box["x"] == (-1, 1) assert model.bounding_box["y"] == (-2, 2) def test_bind_compound_bounding_box_using_with_bounding_box_select(): """ This demonstrates how to bind multiple bounding_boxes which are selectable using the `with_bounding_box`, note there must be a fall-back to implicit. """ model = models.Gaussian1D() truth = models.Gaussian1D() bbox = (0, 1) MESSAGE = r"'tuple' object has no attribute 'items" with pytest.raises(AttributeError, match=MESSAGE): bind_compound_bounding_box(model, bbox, "x") bbox = {0: (-1, 0), 1: (0, 1)} bind_compound_bounding_box(model, bbox, [("x", False)]) # No bounding box assert model(-0.5) == truth(-0.5) assert model(0.5) == truth(0.5) assert model(0) == truth(0) assert model(1) == truth(1) # `with_bounding_box` selects as `-0.5` will not be a key assert model(-0.5, with_bounding_box=0) == truth(-0.5) assert np.isnan(model(-0.5, with_bounding_box=1)) # `with_bounding_box` selects as `0.5` will not be a key assert model(0.5, with_bounding_box=1) == truth(0.5) assert np.isnan(model(0.5, with_bounding_box=(0,))) # Fall back onto implicit selector assert model(0, with_bounding_box=True) == truth(0) assert model(1, with_bounding_box=True) == truth(1) # Attempt to fall-back on implicit selector, but no bounding_box MESSAGE = r"No bounding box is defined for selector: .*" with pytest.raises(RuntimeError, match=MESSAGE): model(0.5, with_bounding_box=True) # Override implicit selector assert np.isnan(model(1, with_bounding_box=0)) def test_fix_inputs_compound_bounding_box(): base_model = models.Gaussian2D(1, 2, 3, 4, 5) bbox = {2.5: (-1, 1), 3.14: (-7, 3)} model = fix_inputs(base_model, {"y": 2.5}, bounding_boxes=bbox) assert model.bounding_box == (-1, 1) model = fix_inputs(base_model, {"x": 2.5}, bounding_boxes=bbox) assert model.bounding_box == (-1, 1) model = fix_inputs( base_model, {"y": 2.5}, bounding_boxes=bbox, selector_args=(("y", True),) ) assert model.bounding_box == (-1, 1) model = fix_inputs( base_model, {"x": 2.5}, bounding_boxes=bbox, selector_args=(("x", True),) ) assert model.bounding_box == (-1, 1) model = fix_inputs( base_model, {"x": 2.5}, bounding_boxes=bbox, selector_args=((0, True),) ) assert model.bounding_box == (-1, 1) base_model = models.Identity(4) bbox = {(2.5, 1.3): ((-1, 1), (-3, 3)), (2.5, 2.71): ((-3, 3), (-1, 1))} model = fix_inputs(base_model, {"x0": 2.5, "x1": 1.3}, bounding_boxes=bbox) assert model.bounding_box == ((-1, 1), (-3, 3)) model = fix_inputs( base_model, {"x0": 2.5, "x1": 1.3}, bounding_boxes=bbox, selector_args=(("x0", True), ("x1", True)), ) assert model.bounding_box == ((-1, 1), (-3, 3)) model = fix_inputs( base_model, {"x0": 2.5, "x1": 1.3}, bounding_boxes=bbox, selector_args=((0, True), (1, True)), ) assert model.bounding_box == ((-1, 1), (-3, 3)) def test_model_copy_with_bounding_box(): model = models.Polynomial2D(2) bbox = ModelBoundingBox.validate(model, ((-0.5, 1047.5), (-0.5, 2047.5)), order="F") # No bbox model_copy = model.copy() assert id(model_copy) != id(model) assert model_copy.get_bounding_box() is None assert model.get_bounding_box() is None # with bbox model.bounding_box = bbox model_copy = model.copy() assert id(model_copy) != id(model) assert id(model_copy.bounding_box) != id(model.bounding_box) for index, interval in model.bounding_box.intervals.items(): interval_copy = model_copy.bounding_box.intervals[index] assert interval == interval_copy assert id(interval) != interval_copy # add model to compound model model1 = model | models.Identity(1) model_copy = model1.copy() assert id(model_copy) != id(model1) assert model_copy.get_bounding_box() is None assert model1.get_bounding_box() is None def test_compound_model_copy_with_bounding_box(): model = models.Shift(1) & models.Shift(2) & models.Identity(1) model.inputs = ("x", "y", "slit_id") bbox = ModelBoundingBox.validate( model, ((-0.5, 1047.5), (-0.5, 2047.5), (-np.inf, np.inf)), order="F" ) # No bbox model_copy = model.copy() assert id(model_copy) != id(model) assert model_copy.get_bounding_box() is None assert model.get_bounding_box() is None # with bbox model.bounding_box = bbox model_copy = model.copy() assert id(model_copy) != id(model) assert id(model_copy.bounding_box) != id(model.bounding_box) for index, interval in model.bounding_box.intervals.items(): interval_copy = model_copy.bounding_box.intervals[index] assert interval == interval_copy assert id(interval) != interval_copy # add model to compound model model1 = model | models.Identity(3) model_copy = model1.copy() assert id(model_copy) != id(model1) assert model_copy.get_bounding_box() is None assert model1.get_bounding_box() is None def test_model_copy_with_compound_bounding_box(): model = models.Polynomial2D(2) bbox = {(0,): (-0.5, 1047.5), (1,): (-0.5, 3047.5)} cbbox = CompoundBoundingBox.validate( model, bbox, selector_args=[("x", True)], order="F" ) # No cbbox model_copy = model.copy() assert id(model_copy) != id(model) assert model_copy.get_bounding_box() is None assert model.get_bounding_box() is None # with cbbox model.bounding_box = cbbox model_copy = model.copy() assert id(model_copy) != id(model) assert id(model_copy.bounding_box) != id(model.bounding_box) assert model_copy.bounding_box.selector_args == model.bounding_box.selector_args assert id(model_copy.bounding_box.selector_args) != id( model.bounding_box.selector_args ) for selector, bbox in model.bounding_box.bounding_boxes.items(): for index, interval in bbox.intervals.items(): interval_copy = model_copy.bounding_box.bounding_boxes[selector].intervals[ index ] assert interval == interval_copy assert id(interval) != interval_copy # add model to compound model model1 = model | models.Identity(1) model_copy = model1.copy() assert id(model_copy) != id(model1) assert model_copy.get_bounding_box() is None assert model1.get_bounding_box() is None def test_compound_model_copy_with_compound_bounding_box(): model = models.Shift(1) & models.Shift(2) & models.Identity(1) model.inputs = ("x", "y", "slit_id") bbox = { (0,): ((-0.5, 1047.5), (-0.5, 2047.5)), (1,): ((-0.5, 3047.5), (-0.5, 4047.5)), } cbbox = CompoundBoundingBox.validate( model, bbox, selector_args=[("slit_id", True)], order="F" ) # No cbbox model_copy = model.copy() assert id(model_copy) != id(model) assert model_copy.get_bounding_box() is None assert model.get_bounding_box() is None # with cbbox model.bounding_box = cbbox model_copy = model.copy() assert id(model_copy) != id(model) assert id(model_copy.bounding_box) != id(model.bounding_box) assert model_copy.bounding_box.selector_args == model.bounding_box.selector_args assert id(model_copy.bounding_box.selector_args) != id( model.bounding_box.selector_args ) for selector, bbox in model.bounding_box.bounding_boxes.items(): for index, interval in bbox.intervals.items(): interval_copy = model_copy.bounding_box.bounding_boxes[selector].intervals[ index ] assert interval == interval_copy assert id(interval) != interval_copy # add model to compound model model1 = model | models.Identity(3) model_copy = model1.copy() assert id(model_copy) != id(model1) assert model_copy.get_bounding_box() is None assert model1.get_bounding_box() is None def test_compound_model_copy_user_attribute(): """Regression test for issue #12370""" model = models.Gaussian2D(100, 25, 25, 5, 5) | models.Identity(1) model.xname = "x_mean" # user-defined attribute assert hasattr(model, "xname") assert model.xname == "x_mean" model_copy = model.copy() model_copy.xname assert hasattr(model_copy, "xname") assert model_copy.xname == "x_mean" def test_model_mixed_array_scalar_bounding_box(): """Regression test for issue #12319""" model = models.Gaussian2D() bbox = ModelBoundingBox.validate(model, ((-1, 1), (-np.inf, np.inf)), order="F") model.bounding_box = bbox x = np.array([-0.5, 0.5]) y = 0 # Everything works when its all in the bounding box assert (model(x, y) == (model(x, y, with_bounding_box=True))).all() def test_compound_model_mixed_array_scalar_bounding_box(): """Regression test for issue #12319""" model = models.Shift(1) & models.Shift(2) & models.Identity(1) model.inputs = ("x", "y", "slit_id") bbox = ModelBoundingBox.validate( model, ((-0.5, 1047.5), (-0.5, 2047.5), (-np.inf, np.inf)), order="F" ) model.bounding_box = bbox x = np.array([1000, 1001]) y = np.array([2000, 2001]) slit_id = 0 # Everything works when its all in the bounding box value0 = model(x, y, slit_id) value1 = model(x, y, slit_id, with_bounding_box=True) assert_equal(value0, value1) def test_model_with_bounding_box_true_and_single_output(): """Regression test for issue #12373""" model = models.Mapping((1,)) x = [1, 2] y = [3, 4] # Check baseline assert_equal(model(x, y), [3, 4]) # Check with_bounding_box=True should be the same assert_equal(model(x, y, with_bounding_box=True), [3, 4]) model.bounding_box = ((-np.inf, np.inf), (-np.inf, np.inf)) # Check baseline assert_equal(model(x, y), [3, 4]) # Check with_bounding_box=True should be the same assert_equal(model(x, y, with_bounding_box=True), [3, 4]) def test_compound_model_with_bounding_box_true_and_single_output(): """Regression test for issue #12373""" model = models.Mapping((1,)) | models.Shift(1) x = [1, 2] y = [3, 4] # Check baseline assert_equal(model(x, y), [4, 5]) # Check with_bounding_box=True should be the same assert_equal(model(x, y, with_bounding_box=True), [4, 5]) model.bounding_box = ((-np.inf, np.inf), (-np.inf, np.inf)) # Check baseline assert_equal(model(x, y), [4, 5]) # Check with_bounding_box=True should be the same assert_equal(model(x, y, with_bounding_box=True), [4, 5]) def test_bounding_box_pass_with_ignored(): """Test the possiblity of setting ignored variables in bounding box""" model = models.Polynomial2D(2) bbox = ModelBoundingBox.validate(model, (-1, 1), ignored=["y"]) model.bounding_box = bbox assert model.bounding_box.bounding_box() == (-1, 1) assert model.bounding_box == bbox model = models.Polynomial2D(2) bind_bounding_box(model, (-1, 1), ignored=["y"]) assert model.bounding_box.bounding_box() == (-1, 1) assert model.bounding_box == bbox def test_compound_bounding_box_pass_with_ignored(): model = models.Shift(1) & models.Shift(2) & models.Identity(1) model.inputs = ("x", "y", "slit_id") bbox = { (0,): (-0.5, 1047.5), (1,): (-0.5, 2047.5), } cbbox = CompoundBoundingBox.validate( model, bbox, selector_args=[("slit_id", True)], ignored=["y"], order="F" ) model.bounding_box = cbbox model = models.Shift(1) & models.Shift(2) & models.Identity(1) model.inputs = ("x", "y", "slit_id") bind_compound_bounding_box( model, bbox, selector_args=[("slit_id", True)], ignored=["y"], order="F" ) assert model.bounding_box == cbbox @pytest.mark.parametrize("int_type", [int, np.int32, np.int64, np.uint32, np.uint64]) def test_model_integer_indexing(int_type): """Regression for PR 12561; verify that compound model components can be accessed by integer index""" gauss = models.Gaussian2D() airy = models.AiryDisk2D() compound = gauss + airy assert compound[int_type(0)] == gauss assert compound[int_type(1)] == airy def test_model_string_indexing(): """Regression for PR 12561; verify that compound model components can be accessed by indexing with model name""" gauss = models.Gaussian2D() gauss.name = "Model1" airy = models.AiryDisk2D() airy.name = "Model2" compound = gauss + airy assert compound["Model1"] == gauss assert compound["Model2"] == airy
ccbb3e0bbec6cf9121cc60fbbce524ecf8a35b7badac81e12b31d96f3a88c36d
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Tests for spline models and fitters""" import unittest.mock as mk import numpy as np import pytest from numpy.testing import assert_allclose from astropy.modeling.core import FittableModel, ModelDefinitionError from astropy.modeling.fitting import ( SplineExactKnotsFitter, SplineInterpolateFitter, SplineSmoothingFitter, SplineSplrepFitter, ) from astropy.modeling.parameters import Parameter from astropy.modeling.spline import Spline1D, _Spline, _SplineFitter from astropy.utils.compat.optional_deps import HAS_SCIPY from astropy.utils.exceptions import AstropyUserWarning npts = 50 nknots = 10 np.random.seed(42) test_w = np.random.rand(npts) test_t = [-1, 0, 1] noise = np.random.randn(npts) degree_tests = [1, 2, 3, 4, 5] wieght_tests = [None, test_w] smoothing_tests = [None, 0.01] class TestSpline: def setup_class(self): self.num_opt = 3 self.optional_inputs = {f"test{i}": mk.MagicMock() for i in range(self.num_opt)} self.extra_kwargs = {f"new{i}": mk.MagicMock() for i in range(self.num_opt)} class Spline(_Spline): optional_inputs = {"test": "test"} def _init_parameters(self): super()._init_parameters() def _init_data(self, knots, coeffs, bounds=None): super()._init_data(knots, coeffs, bounds=bounds) self.Spline = Spline def test___init__(self): # empty spline spl = self.Spline() assert spl._t is None assert spl._c is None assert spl._user_knots is False assert spl._degree is None assert spl._test is None assert not hasattr(spl, "degree") # Call _init_spline with mk.patch.object(_Spline, "_init_spline", autospec=True) as mkInit: # No call (knots=None) spl = self.Spline() assert mkInit.call_args_list == [] knots = mk.MagicMock() coeffs = mk.MagicMock() bounds = mk.MagicMock() spl = self.Spline(knots=knots, coeffs=coeffs, bounds=bounds) assert mkInit.call_args_list == [mk.call(spl, knots, coeffs, bounds)] assert spl._t is None assert spl._c is None assert spl._user_knots is False assert spl._degree is None assert spl._test is None # Coeffs but no knots MESSAGE = r"If one passes a coeffs vector one needs to also pass knots!" with pytest.raises(ValueError, match=MESSAGE): self.Spline(coeffs=mk.MagicMock()) def test_param_names(self): # no parameters spl = self.Spline() assert spl.param_names == () knot_names = tuple(mk.MagicMock() for _ in range(3)) spl._knot_names = knot_names assert spl.param_names == knot_names coeff_names = tuple(mk.MagicMock() for _ in range(3)) spl._coeff_names = coeff_names assert spl.param_names == knot_names + coeff_names def test__optional_arg(self): spl = self.Spline() assert spl._optional_arg("test") == "_test" def test__create_optional_inputs(self): class Spline(self.Spline): optional_inputs = self.optional_inputs def __init__(self): self._create_optional_inputs() spl = Spline() for arg in self.optional_inputs: attribute = spl._optional_arg(arg) assert hasattr(spl, attribute) assert getattr(spl, attribute) is None with pytest.raises( ValueError, match=r"Optional argument .* already exists in this class!" ): spl._create_optional_inputs() def test__intercept_optional_inputs(self): class Spline(self.Spline): optional_inputs = self.optional_inputs def __init__(self): self._create_optional_inputs() spl = Spline() new_kwargs = spl._intercept_optional_inputs(**self.extra_kwargs) for arg, value in self.optional_inputs.items(): attribute = spl._optional_arg(arg) assert getattr(spl, attribute) is None assert new_kwargs == self.extra_kwargs kwargs = self.extra_kwargs.copy() for arg in self.optional_inputs: kwargs[arg] = mk.MagicMock() new_kwargs = spl._intercept_optional_inputs(**kwargs) for arg, value in self.optional_inputs.items(): attribute = spl._optional_arg(arg) assert getattr(spl, attribute) is not None assert getattr(spl, attribute) == kwargs[arg] assert getattr(spl, attribute) != value assert arg not in new_kwargs assert new_kwargs == self.extra_kwargs assert kwargs != self.extra_kwargs with pytest.raises( RuntimeError, match=r".* has already been set, something has gone wrong!" ): spl._intercept_optional_inputs(**kwargs) def test_evaluate(self): class Spline(self.Spline): optional_inputs = self.optional_inputs spl = Spline() # No options passed in and No options set new_kwargs = spl.evaluate(**self.extra_kwargs) for arg, value in self.optional_inputs.items(): assert new_kwargs[arg] == value for arg, value in self.extra_kwargs.items(): assert new_kwargs[arg] == value assert len(new_kwargs) == (len(self.optional_inputs) + len(self.extra_kwargs)) # No options passed in and Options set kwargs = self.extra_kwargs.copy() for arg in self.optional_inputs: kwargs[arg] = mk.MagicMock() spl._intercept_optional_inputs(**kwargs) new_kwargs = spl.evaluate(**self.extra_kwargs) assert new_kwargs == kwargs for arg in self.optional_inputs: attribute = spl._optional_arg(arg) assert getattr(spl, attribute) is None # Options passed in set_kwargs = self.extra_kwargs.copy() for arg in self.optional_inputs: kwargs[arg] = mk.MagicMock() spl._intercept_optional_inputs(**set_kwargs) kwargs = self.extra_kwargs.copy() for arg in self.optional_inputs: kwargs[arg] = mk.MagicMock() assert set_kwargs != kwargs new_kwargs = spl.evaluate(**kwargs) assert new_kwargs == kwargs def test___call__(self): spl = self.Spline() args = tuple(mk.MagicMock() for _ in range(3)) kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)} new_kwargs = {f"new_test{idx}": mk.MagicMock() for idx in range(3)} with mk.patch.object( _Spline, "_intercept_optional_inputs", autospec=True, return_value=new_kwargs, ) as mkIntercept: with mk.patch.object(FittableModel, "__call__", autospec=True) as mkCall: assert mkCall.return_value == spl(*args, **kwargs) assert mkCall.call_args_list == [mk.call(spl, *args, **new_kwargs)] assert mkIntercept.call_args_list == [mk.call(spl, **kwargs)] def test__create_parameter(self): np.random.seed(37) base_vec = np.random.random(20) test = base_vec.copy() fixed_test = base_vec.copy() class Spline(self.Spline): @property def test(self): return test @property def fixed_test(self): return fixed_test spl = Spline() assert (spl.test == test).all() assert (spl.fixed_test == fixed_test).all() for index in range(20): name = f"test_name{index}" spl._create_parameter(name, index, "test") assert hasattr(spl, name) param = getattr(spl, name) assert isinstance(param, Parameter) assert param.model == spl assert param.fixed is False assert param.value == test[index] == spl.test[index] == base_vec[index] new_set = np.random.random() param.value = new_set assert spl.test[index] == new_set assert spl.test[index] != base_vec[index] new_get = np.random.random() spl.test[index] = new_get assert param.value == new_get assert param.value != new_set for index in range(20): name = f"fixed_test_name{index}" spl._create_parameter(name, index, "fixed_test", True) assert hasattr(spl, name) param = getattr(spl, name) assert isinstance(param, Parameter) assert param.model == spl assert param.fixed is True assert ( param.value == fixed_test[index] == spl.fixed_test[index] == base_vec[index] ) new_set = np.random.random() param.value = new_set assert spl.fixed_test[index] == new_set assert spl.fixed_test[index] != base_vec[index] new_get = np.random.random() spl.fixed_test[index] = new_get assert param.value == new_get assert param.value != new_set def test__create_parameters(self): np.random.seed(37) test = np.random.random(20) class Spline(self.Spline): @property def test(self): return test spl = Spline() fixed = mk.MagicMock() with mk.patch.object(_Spline, "_create_parameter", autospec=True) as mkCreate: params = spl._create_parameters("test_param", "test", fixed) assert params == tuple(f"test_param{idx}" for idx in range(20)) assert mkCreate.call_args_list == [ mk.call(spl, f"test_param{idx}", idx, "test", fixed) for idx in range(20) ] def test__init_parameters(self): spl = self.Spline() MESSAGE = r"This needs to be implemented" with pytest.raises(NotImplementedError, match=MESSAGE): spl._init_parameters() def test__init_data(self): spl = self.Spline() MESSAGE = r"This needs to be implemented" with pytest.raises(NotImplementedError, match=MESSAGE): spl._init_data(mk.MagicMock(), mk.MagicMock(), mk.MagicMock()) with pytest.raises(NotImplementedError, match=MESSAGE): spl._init_data(mk.MagicMock(), mk.MagicMock()) def test__init_spline(self): spl = self.Spline() knots = mk.MagicMock() coeffs = mk.MagicMock() bounds = mk.MagicMock() with mk.patch.object( _Spline, "_init_parameters", autospec=True ) as mkParameters: with mk.patch.object(_Spline, "_init_data", autospec=True) as mkData: main = mk.MagicMock() main.attach_mock(mkParameters, "parameters") main.attach_mock(mkData, "data") spl._init_spline(knots, coeffs, bounds) assert main.mock_calls == [ mk.call.data(spl, knots, coeffs, bounds=bounds), mk.call.parameters(spl), ] def test__init_tck(self): spl = self.Spline() assert spl._c is None assert spl._t is None assert spl._degree is None spl = self.Spline(degree=4) assert spl._c is None assert spl._t is None assert spl._degree == 4 @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") class TestSpline1D: def setup_class(self): def func(x, noise=0): return np.exp(-(x**2)) + 0.1 * noise self.x = np.linspace(-3, 3, npts) self.y = func(self.x, noise) self.truth = func(self.x) arg_sort = np.argsort(self.x) np.random.shuffle(arg_sort) self.x_s = self.x[arg_sort] self.y_s = func(self.x_s, noise[arg_sort]) self.npts_out = 1000 self.xs = np.linspace(-3, 3, self.npts_out) self.t = np.linspace(-3, 3, nknots)[1:-1] def check_parameter(self, spl, base_name, name, index, value, fixed): assert base_name in name assert index == int(name.split(base_name)[-1]) knot_name = f"{base_name}{index}" assert knot_name == name assert hasattr(spl, name) param = getattr(spl, name) assert isinstance(param, Parameter) assert param.name == name assert param.value == value(index) assert param.model == spl assert param.fixed is fixed def check_parameters(self, spl, params, base_name, value, fixed): for idx, name in enumerate(params): self.check_parameter(spl, base_name, name, idx, value, fixed) def update_parameters(self, spl, knots, value): for name in knots: param = getattr(spl, name) param.value = value assert param.value == value def test___init__with_no_knot_information(self): spl = Spline1D() assert spl._degree == 3 assert spl._user_knots is False assert spl._t is None assert spl._c is None assert spl._nu is None # Check no parameters created assert len(spl._knot_names) == 0 assert len(spl._coeff_names) == 0 def test___init__with_number_of_knots(self): spl = Spline1D(knots=10) # Check baseline data assert spl._degree == 3 assert spl._user_knots is False assert spl._nu is None # Check vector data assert len(spl._t) == 18 t = np.zeros(18) t[-4:] = 1 assert (spl._t == t).all() assert len(spl._c) == 18 assert (spl._c == np.zeros(18)).all() # Check all parameter names created: assert len(spl._knot_names) == 18 assert len(spl._coeff_names) == 18 # Check knot values: def value0(idx): if idx < 18 - 4: return 0 else: return 1 self.check_parameters(spl, spl._knot_names, "knot", value0, True) # Check coeff values: def value1(idx): return 0 self.check_parameters(spl, spl._coeff_names, "coeff", value1, False) def test___init__with_full_custom_knots(self): t = 17 * np.arange(20) - 32 spl = Spline1D(knots=t) # Check baseline data assert spl._degree == 3 assert spl._user_knots is True assert spl._nu is None # Check vector data assert (spl._t == t).all() assert len(spl._c) == 20 assert (spl._c == np.zeros(20)).all() # Check all parameter names created assert len(spl._knot_names) == 20 assert len(spl._coeff_names) == 20 # Check knot values: def value0(idx): return t[idx] self.check_parameters(spl, spl._knot_names, "knot", value0, True) # Check coeff values def value1(idx): return 0 self.check_parameters(spl, spl._coeff_names, "coeff", value1, False) def test___init__with_interior_custom_knots(self): t = np.arange(1, 20) spl = Spline1D(knots=t, bounds=[0, 20]) # Check baseline data assert spl._degree == 3 assert spl._user_knots is True assert spl._nu is None # Check vector data assert len(spl._t) == 27 assert (spl._t[4:-4] == t).all() assert (spl._t[:4] == 0).all() assert (spl._t[-4:] == 20).all() assert len(spl._c) == 27 assert (spl._c == np.zeros(27)).all() # Check knot values: def value0(idx): if idx < 4: return 0 elif idx >= 19 + 4: return 20 else: return t[idx - 4] self.check_parameters(spl, spl._knot_names, "knot", value0, True) # Check coeff values def value1(idx): return 0 self.check_parameters(spl, spl._coeff_names, "coeff", value1, False) def test___init__with_user_knots_and_coefficients(self): t = 17 * np.arange(20) - 32 c = np.linspace(-1, 1, 20) spl = Spline1D(knots=t, coeffs=c) # Check baseline data assert spl._degree == 3 assert spl._user_knots is True assert spl._nu is None # Check vector data assert (spl._t == t).all() assert len(spl._c) == 20 assert (spl._c == c).all() # Check all parameter names created assert len(spl._knot_names) == 20 assert len(spl._coeff_names) == 20 # Check knot values: def value0(idx): return t[idx] self.check_parameters(spl, spl._knot_names, "knot", value0, True) # Check coeff values def value1(idx): return c[idx] self.check_parameters(spl, spl._coeff_names, "coeff", value1, False) def test___init__errors(self): # Bad knot type knots = 3.5 MESSAGE = f"Knots: {knots} must be iterable or value" with pytest.raises(ValueError, match=MESSAGE): Spline1D(knots=knots) # Not enough knots MESSAGE = r"Must have at least 8 knots" for idx in range(8): with pytest.raises(ValueError, match=MESSAGE): Spline1D(knots=np.arange(idx)) # Bad scipy spline t = np.arange(20)[::-1] MESSAGE = r"Knots must be in a non-decreasing order" with pytest.raises(ValueError, match=MESSAGE): Spline1D(knots=t) def test_parameter_array_link(self): spl = Spline1D(10) # Check knot base values def value0(idx): if idx < 18 - 4: return 0 else: return 1 self.check_parameters(spl, spl._knot_names, "knot", value0, True) # Check knot vector -> knot parameter link t = np.arange(18) spl._t = t.copy() def value1(idx): return t[idx] self.check_parameters(spl, spl._knot_names, "knot", value1, True) # Check knot parameter -> knot vector link self.update_parameters(spl, spl._knot_names, 3) assert (spl._t[:] == 3).all() # Check coeff base values def value2(idx): return 0 self.check_parameters(spl, spl._coeff_names, "coeff", value2, False) # Check coeff vector -> coeff parameter link c = 5 * np.arange(18) + 18 spl._c = c.copy() def value3(idx): return c[idx] self.check_parameters(spl, spl._coeff_names, "coeff", value3, False) # Check coeff parameter -> coeff vector link self.update_parameters(spl, spl._coeff_names, 4) assert (spl._c[:] == 4).all() def test_two_splines(self): spl0 = Spline1D(knots=10) spl1 = Spline1D(knots=15, degree=2) assert spl0._degree == 3 assert len(spl0._t) == 18 t = np.zeros(18) t[-4:] = 1 assert (spl0._t == t).all() assert len(spl0._c) == 18 assert (spl0._c == np.zeros(18)).all() assert spl1._degree == 2 assert len(spl1._t) == 21 t = np.zeros(21) t[-3:] = 1 assert (spl1._t == t).all() assert len(spl1._c) == 21 assert (spl1._c == np.zeros(21)).all() # Check all knot names created assert len(spl0._knot_names) == 18 assert len(spl1._knot_names) == 21 # Check knot base values def value0(idx): if idx < 18 - 4: return 0 else: return 1 self.check_parameters(spl0, spl0._knot_names, "knot", value0, True) def value1(idx): if idx < 21 - 3: return 0 else: return 1 self.check_parameters(spl1, spl1._knot_names, "knot", value1, True) # Check knot vector -> knot parameter link t0 = 7 * np.arange(18) + 27 t1 = 11 * np.arange(21) + 19 spl0._t[:] = t0.copy() spl1._t[:] = t1.copy() def value2(idx): return t0[idx] self.check_parameters(spl0, spl0._knot_names, "knot", value2, True) def value3(idx): return t1[idx] self.check_parameters(spl1, spl1._knot_names, "knot", value3, True) # Check knot parameter -> knot vector link self.update_parameters(spl0, spl0._knot_names, 3) self.update_parameters(spl1, spl1._knot_names, 4) assert (spl0._t[:] == 3).all() assert (spl1._t[:] == 4).all() # Check all coeff names created assert len(spl0._coeff_names) == 18 assert len(spl1._coeff_names) == 21 # Check coeff base values def value4(idx): return 0 self.check_parameters(spl0, spl0._coeff_names, "coeff", value4, False) self.check_parameters(spl1, spl1._coeff_names, "coeff", value4, False) # Check coeff vector -> coeff parameter link c0 = 17 * np.arange(18) + 14 c1 = 37 * np.arange(21) + 47 spl0._c[:] = c0.copy() spl1._c[:] = c1.copy() def value5(idx): return c0[idx] self.check_parameters(spl0, spl0._coeff_names, "coeff", value5, False) def value6(idx): return c1[idx] self.check_parameters(spl1, spl1._coeff_names, "coeff", value6, False) # Check coeff parameter -> coeff vector link self.update_parameters(spl0, spl0._coeff_names, 5) self.update_parameters(spl1, spl1._coeff_names, 6) assert (spl0._t[:] == 3).all() assert (spl1._t[:] == 4).all() assert (spl0._c[:] == 5).all() assert (spl1._c[:] == 6).all() def test__knot_names(self): # no parameters spl = Spline1D() assert spl._knot_names == () # some parameters knot_names = [f"knot{idx}" for idx in range(18)] spl = Spline1D(10) assert spl._knot_names == tuple(knot_names) def test__coeff_names(self): # no parameters spl = Spline1D() assert spl._coeff_names == () # some parameters coeff_names = [f"coeff{idx}" for idx in range(18)] spl = Spline1D(10) assert spl._coeff_names == tuple(coeff_names) def test_param_names(self): # no parameters spl = Spline1D() assert spl.param_names == () # some parameters knot_names = [f"knot{idx}" for idx in range(18)] coeff_names = [f"coeff{idx}" for idx in range(18)] param_names = knot_names + coeff_names spl = Spline1D(10) assert spl.param_names == tuple(param_names) def test_t(self): # no parameters spl = Spline1D() # test get assert spl._t is None assert (spl.t == [0, 0, 0, 0, 1, 1, 1, 1]).all() # test set MESSAGE = r"The model parameters must be initialized before setting knots" with pytest.raises(ValueError, match=MESSAGE): spl.t = mk.MagicMock() # with parameters spl = Spline1D(10) # test get t = np.zeros(18) t[-4:] = 1 assert (spl._t == t).all() assert (spl.t == t).all() # test set spl.t = np.arange(18) + 15 assert (spl._t == (np.arange(18) + 15)).all() assert (spl.t == (np.arange(18) + 15)).all() assert (spl.t != t).all() # set error MESSAGE = r"There must be exactly as many knots as previously defined" for idx in range(30): if idx == 18: continue with pytest.raises(ValueError, match=MESSAGE): spl.t = np.arange(idx) def test_c(self): # no parameters spl = Spline1D() # test get assert spl._c is None assert (spl.c == [0, 0, 0, 0, 0, 0, 0, 0]).all() # test set MESSAGE = r"The model parameters must be initialized before setting coeffs" with pytest.raises(ValueError, match=MESSAGE): spl.c = mk.MagicMock() # with parameters spl = Spline1D(10) # test get assert (spl._c == np.zeros(18)).all() assert (spl.c == np.zeros(18)).all() # test set spl.c = np.arange(18) + 15 assert (spl._c == (np.arange(18) + 15)).all() assert (spl.c == (np.arange(18) + 15)).all() assert (spl.c != np.zeros(18)).all() # set error MESSAGE = r"There must be exactly as many coeffs as previously defined" for idx in range(30): if idx == 18: continue with pytest.raises(ValueError, match=MESSAGE): spl.c = np.arange(idx) def test_degree(self): # default degree spl = Spline1D() # test get assert spl._degree == 3 assert spl.degree == 3 # test set # non-default degree spl = Spline1D(degree=2) # test get assert spl._degree == 2 assert spl.degree == 2 def test__initialized(self): # no parameters spl = Spline1D() assert spl._initialized is False # with parameters spl = Spline1D(knots=10, degree=2) assert spl._initialized is True def test_tck(self): # no parameters spl = Spline1D() # test get assert (spl.t == [0, 0, 0, 0, 1, 1, 1, 1]).all() assert (spl.c == [0, 0, 0, 0, 0, 0, 0, 0]).all() assert spl.degree == 3 tck = spl.tck assert (tck[0] == spl.t).all() assert (tck[1] == spl.c).all() assert tck[2] == spl.degree # test set assert spl._t is None assert spl._c is None assert spl._knot_names == () assert spl._coeff_names == () t = np.array([0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 5, 5]) np.random.seed(619) c = np.random.random(12) k = 3 spl.tck = (t, c, k) assert (spl._t == t).all() assert (spl._c == c).all() assert spl.degree == k def value0(idx): return t[idx] self.check_parameters(spl, spl._knot_names, "knot", value0, True) def value1(idx): return c[idx] self.check_parameters(spl, spl._coeff_names, "coeff", value1, False) # with parameters spl = Spline1D(knots=10, degree=2) # test get t = np.zeros(16) t[-3:] = 1 assert (spl.t == t).all() assert (spl.c == np.zeros(16)).all() assert spl.degree == 2 tck = spl.tck assert (tck[0] == spl.t).all() assert (tck[1] == spl.c).all() assert tck[2] == spl.degree # test set t = 5 * np.arange(16) + 11 c = 7 * np.arange(16) + 13 k = 2 spl.tck = (t, c, k) assert (spl.t == t).all() assert (spl.c == c).all() assert spl.degree == k tck = spl.tck assert (tck[0] == spl.t).all() assert (tck[1] == spl.c).all() assert tck[2] == spl.degree # Error MESSAGE = r"tck has incompatible degree!" with pytest.raises(ValueError, match=MESSAGE): spl.tck = (t, c, 4) def test_bspline(self): from scipy.interpolate import BSpline # no parameters spl = Spline1D() bspline = spl.bspline assert isinstance(bspline, BSpline) assert (bspline.tck[0] == spl.tck[0]).all() assert (bspline.tck[1] == spl.tck[1]).all() assert bspline.tck[2] == spl.tck[2] t = np.array([0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 5, 5]) np.random.seed(619) c = np.random.random(12) k = 3 def value0(idx): return t[idx] def value1(idx): return c[idx] # set (bspline) spl = Spline1D() assert spl._t is None assert spl._c is None assert spl._knot_names == () assert spl._coeff_names == () bspline = BSpline(t, c, k) spl.bspline = bspline assert (spl._t == t).all() assert (spl._c == c).all() assert spl.degree == k self.check_parameters(spl, spl._knot_names, "knot", value0, True) self.check_parameters(spl, spl._coeff_names, "coeff", value1, False) # set (tuple spline) spl = Spline1D() assert spl._t is None assert spl._c is None assert spl._knot_names == () assert spl._coeff_names == () spl.bspline = (t, c, k) assert (spl._t == t).all() assert (spl._c == c).all() assert spl.degree == k self.check_parameters(spl, spl._knot_names, "knot", value0, True) self.check_parameters(spl, spl._coeff_names, "coeff", value1, False) # with parameters spl = Spline1D(knots=10, degree=2) bspline = spl.bspline assert isinstance(bspline, BSpline) assert (bspline.tck[0] == spl.tck[0]).all() assert (bspline.tck[1] == spl.tck[1]).all() assert bspline.tck[2] == spl.tck[2] def test_knots(self): # no parameters spl = Spline1D() assert spl.knots == [] # with parameters spl = Spline1D(10) knots = spl.knots assert len(knots) == 18 for knot in knots: assert isinstance(knot, Parameter) assert hasattr(spl, knot.name) assert getattr(spl, knot.name) == knot def test_coeffs(self): # no parameters spl = Spline1D() assert spl.coeffs == [] # with parameters spl = Spline1D(10) coeffs = spl.coeffs assert len(coeffs) == 18 for coeff in coeffs: assert isinstance(coeff, Parameter) assert hasattr(spl, coeff.name) assert getattr(spl, coeff.name) == coeff def test__init_parameters(self): spl = Spline1D() with mk.patch.object(Spline1D, "_create_parameters", autospec=True) as mkCreate: spl._init_parameters() assert mkCreate.call_args_list == [ mk.call(spl, "knot", "t", fixed=True), mk.call(spl, "coeff", "c"), ] def test__init_bounds(self): spl = Spline1D() has_bounds, lower, upper = spl._init_bounds() assert has_bounds is False assert (lower == [0, 0, 0, 0]).all() assert (upper == [1, 1, 1, 1]).all() assert spl._user_bounding_box is None has_bounds, lower, upper = spl._init_bounds((-5, 5)) assert has_bounds is True assert (lower == [-5, -5, -5, -5]).all() assert (upper == [5, 5, 5, 5]).all() assert spl._user_bounding_box == (-5, 5) def test__init_knots(self): np.random.seed(19) lower = np.random.random(4) upper = np.random.random(4) # Integer with mk.patch.object( Spline1D, "bspline", new_callable=mk.PropertyMock ) as mkBspline: spl = Spline1D() assert spl._t is None spl._init_knots(10, mk.MagicMock(), lower, upper) t = np.concatenate((lower, np.zeros(10), upper)) assert (spl._t == t).all() assert mkBspline.call_args_list == [mk.call()] # vector with bounds with mk.patch.object( Spline1D, "bspline", new_callable=mk.PropertyMock ) as mkBspline: knots = np.random.random(10) spl = Spline1D() assert spl._t is None spl._init_knots(knots, True, lower, upper) t = np.concatenate((lower, knots, upper)) assert (spl._t == t).all() assert mkBspline.call_args_list == [mk.call()] # vector with no bounds with mk.patch.object( Spline1D, "bspline", new_callable=mk.PropertyMock ) as mkBspline: knots = np.random.random(10) spl = Spline1D() assert spl._t is None spl._init_knots(knots, False, lower, upper) assert (spl._t == knots).all() assert mkBspline.call_args_list == [mk.call()] # error MESSAGE = r"Must have at least 8 knots" for num in range(8): knots = np.random.random(num) spl = Spline1D() assert spl._t is None with pytest.raises(ValueError, match=MESSAGE): spl._init_knots(knots, False, lower, upper) # Error spl = Spline1D() assert spl._t is None MESSAGE = r"Knots: 0.5 must be iterable or value" with pytest.raises(ValueError, match=MESSAGE): spl._init_knots(0.5, False, lower, upper) def test__init_coeffs(self): np.random.seed(492) # No coeffs with mk.patch.object( Spline1D, "bspline", new_callable=mk.PropertyMock ) as mkBspline: spl = Spline1D() assert spl._c is None spl._t = [1, 2, 3, 4] spl._init_coeffs() assert (spl._c == [0, 0, 0, 0]).all() assert mkBspline.call_args_list == [mk.call()] # Some coeffs with mk.patch.object( Spline1D, "bspline", new_callable=mk.PropertyMock ) as mkBspline: coeffs = np.random.random(10) spl = Spline1D() assert spl._c is None spl._init_coeffs(coeffs) assert (spl._c == coeffs).all() assert mkBspline.call_args_list == [mk.call()] def test__init_data(self): spl = Spline1D() knots = mk.MagicMock() coeffs = mk.MagicMock() bounds = mk.MagicMock() has_bounds = mk.MagicMock() lower = mk.MagicMock() upper = mk.MagicMock() with mk.patch.object( Spline1D, "_init_bounds", autospec=True, return_value=(has_bounds, lower, upper), ) as mkBounds: with mk.patch.object(Spline1D, "_init_knots", autospec=True) as mkKnots: with mk.patch.object( Spline1D, "_init_coeffs", autospec=True ) as mkCoeffs: main = mk.MagicMock() main.attach_mock(mkBounds, "bounds") main.attach_mock(mkKnots, "knots") main.attach_mock(mkCoeffs, "coeffs") spl._init_data(knots, coeffs, bounds) assert main.mock_calls == [ mk.call.bounds(spl, bounds), mk.call.knots(spl, knots, has_bounds, lower, upper), mk.call.coeffs(spl, coeffs), ] def test_evaluate(self): spl = Spline1D() args = tuple(mk.MagicMock() for _ in range(3)) kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)} new_kwargs = {f"new_test{idx}": mk.MagicMock() for idx in range(3)} with mk.patch.object( _Spline, "evaluate", autospec=True, return_value=new_kwargs ) as mkEval: with mk.patch.object( Spline1D, "bspline", new_callable=mk.PropertyMock ) as mkBspline: assert mkBspline.return_value.return_value == spl.evaluate( *args, **kwargs ) assert mkBspline.return_value.call_args_list == [ mk.call(args[0], **new_kwargs) ] assert mkBspline.call_args_list == [mk.call()] assert mkEval.call_args_list == [mk.call(spl, *args, **kwargs)] # Error MESSAGE = r"Cannot evaluate a derivative of order higher than 4" for idx in range(5, 8): with mk.patch.object( _Spline, "evaluate", autospec=True, return_value={"nu": idx} ): with pytest.raises(RuntimeError, match=MESSAGE): spl.evaluate(*args, **kwargs) def check_knots_created(self, spl, k): def value0(idx): return self.x[0] def value1(idx): return self.x[-1] for idx in range(k + 1): name = f"knot{idx}" self.check_parameter(spl, "knot", name, idx, value0, True) index = len(spl.t) - (k + 1) + idx name = f"knot{index}" self.check_parameter(spl, "knot", name, index, value1, True) def value3(idx): return spl.t[idx] assert len(spl._knot_names) == len(spl.t) for idx, name in enumerate(spl._knot_names): assert name == f"knot{idx}" self.check_parameter(spl, "knot", name, idx, value3, True) def check_coeffs_created(self, spl): def value(idx): return spl.c[idx] assert len(spl._coeff_names) == len(spl.c) for idx, name in enumerate(spl._coeff_names): assert name == f"coeff{idx}" self.check_parameter(spl, "coeff", name, idx, value, False) @staticmethod def check_base_spline(spl, t, c, k): """Check the base spline form""" if t is None: assert spl._t is None else: assert_allclose(spl._t, t) if c is None: assert spl._c is None else: assert_allclose(spl._c, c) assert spl.degree == k assert spl._bounding_box is None def check_spline_fit(self, fit_spl, spline, fitter, atol_fit, atol_truth): """Check the spline fit""" assert_allclose(fit_spl.t, spline._eval_args[0]) assert_allclose(fit_spl.c, spline._eval_args[1]) assert_allclose(fitter.fit_info["spline"]._eval_args[0], spline._eval_args[0]) assert_allclose(fitter.fit_info["spline"]._eval_args[1], spline._eval_args[1]) # check that _parameters are correct assert len(fit_spl._parameters) == len(fit_spl.t) + len(fit_spl.c) assert_allclose(fit_spl._parameters[: len(fit_spl.t)], fit_spl.t) assert_allclose(fit_spl._parameters[len(fit_spl.t) :], fit_spl.c) # check that parameters are correct assert len(fit_spl.parameters) == len(fit_spl.t) + len(fit_spl.c) assert_allclose(fit_spl.parameters[: len(fit_spl.t)], fit_spl.t) assert_allclose(fit_spl.parameters[len(fit_spl.t) :], fit_spl.c) assert_allclose(spline.get_residual(), fitter.fit_info["resid"]) assert_allclose(fit_spl(self.x), spline(self.x)) assert_allclose(fit_spl(self.x), fitter.fit_info["spline"](self.x)) assert_allclose(fit_spl(self.x), self.y, atol=atol_fit) assert_allclose(fit_spl(self.x), self.truth, atol=atol_truth) def check_bbox(self, spl, fit_spl, fitter, w, **kwargs): """Check the spline fit with bbox option""" bbox = [self.x[0], self.x[-1]] bbox_spl = fitter(spl, self.x, self.y, weights=w, bbox=bbox, **kwargs) assert bbox_spl.bounding_box == tuple(bbox) assert_allclose(fit_spl.t, bbox_spl.t) assert_allclose(fit_spl.c, bbox_spl.c) def check_knots_warning(self, fitter, knots, k, w, **kwargs): """Check that the knots warning is raised""" spl = Spline1D(knots=knots, degree=k) with pytest.warns(AstropyUserWarning): fitter(spl, self.x, self.y, weights=w, **kwargs) @pytest.mark.parametrize("w", wieght_tests) @pytest.mark.parametrize("k", degree_tests) def test_interpolate_fitter(self, w, k): fitter = SplineInterpolateFitter() assert fitter.fit_info == {"resid": None, "spline": None} spl = Spline1D(degree=k) self.check_base_spline(spl, None, None, k) fit_spl = fitter(spl, self.x, self.y, weights=w) self.check_base_spline(spl, None, None, k) assert len(fit_spl.t) == (len(self.x) + k + 1) == len(fit_spl._knot_names) self.check_knots_created(fit_spl, k) self.check_coeffs_created(fit_spl) assert fit_spl._bounding_box is None from scipy.interpolate import InterpolatedUnivariateSpline, UnivariateSpline spline = InterpolatedUnivariateSpline(self.x, self.y, w=w, k=k) assert isinstance(fitter.fit_info["spline"], UnivariateSpline) assert spline.get_residual() == 0 self.check_spline_fit(fit_spl, spline, fitter, 0, 1) self.check_bbox(spl, fit_spl, fitter, w) knots = np.linspace(self.x[0], self.x[-1], len(self.x) + k + 1) self.check_knots_warning(fitter, knots, k, w) @pytest.mark.parametrize("w", wieght_tests) @pytest.mark.parametrize("k", degree_tests) @pytest.mark.parametrize("s", smoothing_tests) def test_smoothing_fitter(self, w, k, s): fitter = SplineSmoothingFitter() assert fitter.fit_info == {"resid": None, "spline": None} spl = Spline1D(degree=k) self.check_base_spline(spl, None, None, k) fit_spl = fitter(spl, self.x, self.y, s=s, weights=w) self.check_base_spline(spl, None, None, k) self.check_knots_created(fit_spl, k) self.check_coeffs_created(fit_spl) assert fit_spl._bounding_box is None from scipy.interpolate import UnivariateSpline spline = UnivariateSpline(self.x, self.y, w=w, k=k, s=s) assert isinstance(fitter.fit_info["spline"], UnivariateSpline) self.check_spline_fit(fit_spl, spline, fitter, 1, 1) self.check_bbox(spl, fit_spl, fitter, w, s=s) # test warning knots = fit_spl.t.copy() self.check_knots_warning(fitter, knots, k, w, s=s) @pytest.mark.parametrize("w", wieght_tests) @pytest.mark.parametrize("k", degree_tests) def test_exact_knots_fitter(self, w, k): fitter = SplineExactKnotsFitter() assert fitter.fit_info == {"resid": None, "spline": None} knots = [-1, 0, 1] t = np.concatenate(([self.x[0]] * (k + 1), knots, [self.x[-1]] * (k + 1))) c = np.zeros(len(t)) # With knots preset spl = Spline1D(knots=knots, degree=k, bounds=[self.x[0], self.x[-1]]) self.check_base_spline(spl, t, c, k) assert (spl.t_interior == knots).all() fit_spl = fitter(spl, self.x, self.y, weights=w) self.check_base_spline(spl, t, c, k) assert (spl.t_interior == knots).all() assert len(fit_spl.t) == len(t) == len(fit_spl._knot_names) self.check_knots_created(fit_spl, k) self.check_coeffs_created(fit_spl) assert fit_spl._bounding_box is None from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline spline = LSQUnivariateSpline(self.x, self.y, knots, w=w, k=k) assert isinstance(fitter.fit_info["spline"], UnivariateSpline) assert_allclose(spline.get_residual(), 0.1, atol=1) assert_allclose(fitter.fit_info["spline"].get_residual(), 0.1, atol=1) self.check_spline_fit(fit_spl, spline, fitter, 1, 1) self.check_bbox(spl, fit_spl, fitter, w) # Pass knots via fitter function with pytest.warns(AstropyUserWarning): fitter(spl, self.x, self.y, t=knots, weights=w) # pass no knots spl = Spline1D(degree=k) MESSAGE = r"No knots have been provided" with pytest.raises(RuntimeError, match=MESSAGE): fitter(spl, self.x, self.y, weights=w) @pytest.mark.parametrize("w", wieght_tests) @pytest.mark.parametrize("k", degree_tests) @pytest.mark.parametrize("s", smoothing_tests) def test_splrep_fitter_no_knots(self, w, k, s): fitter = SplineSplrepFitter() assert fitter.fit_info == {"fp": None, "ier": None, "msg": None} spl = Spline1D(degree=k) self.check_base_spline(spl, None, None, k) fit_spl = fitter(spl, self.x, self.y, s=s, weights=w) self.check_base_spline(spl, None, None, k) self.check_knots_created(fit_spl, k) self.check_coeffs_created(fit_spl) assert fit_spl._bounding_box is None from scipy.interpolate import BSpline, splrep tck, spline_fp, spline_ier, spline_msg = splrep( self.x, self.y, w=w, k=k, s=s, full_output=1 ) assert_allclose(fit_spl.t, tck[0]) assert_allclose(fit_spl.c, tck[1]) assert fitter.fit_info["fp"] == spline_fp assert fitter.fit_info["ier"] == spline_ier assert fitter.fit_info["msg"] == spline_msg spline = BSpline(*tck) assert_allclose(fit_spl(self.x), spline(self.x)) assert_allclose(fit_spl(self.x), self.y, atol=1) assert_allclose(fit_spl(self.x), self.truth, atol=1) self.check_bbox(spl, fit_spl, fitter, w, s=s) @pytest.mark.parametrize("w", wieght_tests) @pytest.mark.parametrize("k", degree_tests) def test_splrep_fitter_with_knots(self, w, k): fitter = SplineSplrepFitter() assert fitter.fit_info == {"fp": None, "ier": None, "msg": None} knots = [-1, 0, 1] t = np.concatenate(([self.x[0]] * (k + 1), knots, [self.x[-1]] * (k + 1))) c = np.zeros(len(t)) # With knots preset spl = Spline1D(knots=knots, degree=k, bounds=[self.x[0], self.x[-1]]) self.check_base_spline(spl, t, c, k) assert (spl.t_interior == knots).all() fit_spl = fitter(spl, self.x, self.y, weights=w) self.check_base_spline(spl, t, c, k) assert (spl.t_interior == knots).all() self.check_knots_created(fit_spl, k) self.check_coeffs_created(fit_spl) assert fit_spl._bounding_box is None from scipy.interpolate import BSpline, splrep tck, spline_fp, spline_ier, spline_msg = splrep( self.x, self.y, w=w, k=k, t=knots, full_output=1 ) assert_allclose(fit_spl.t, tck[0]) assert_allclose(fit_spl.c, tck[1]) assert fitter.fit_info["fp"] == spline_fp assert fitter.fit_info["ier"] == spline_ier assert fitter.fit_info["msg"] == spline_msg spline = BSpline(*tck) assert_allclose(fit_spl(self.x), spline(self.x)) assert_allclose(fit_spl(self.x), self.y, atol=1) assert_allclose(fit_spl(self.x), self.truth, atol=1) self.check_bbox(spl, fit_spl, fitter, w) # test warning with pytest.warns(AstropyUserWarning): fitter(spl, self.x, self.y, t=knots, weights=w) # With no knots present spl = Spline1D(degree=k) self.check_base_spline(spl, None, None, k) fit_spl = fitter(spl, self.x, self.y, t=knots, weights=w) self.check_base_spline(spl, None, None, k) self.check_knots_created(fit_spl, k) self.check_coeffs_created(fit_spl) assert fit_spl._bounding_box is None from scipy.interpolate import BSpline, splrep tck = splrep(self.x, self.y, w=w, k=k, t=knots) assert_allclose(fit_spl.t, tck[0]) assert_allclose(fit_spl.c, tck[1]) spline = BSpline(*tck) assert_allclose(fit_spl(self.x), spline(self.x)) assert_allclose(fit_spl(self.x), self.y, atol=1) assert_allclose(fit_spl(self.x), self.truth, atol=1) self.check_bbox(spl, fit_spl, fitter, w, t=knots) def generate_spline(self, w=None, bbox=[None] * 2, k=None, s=None, t=None): if k is None: k = 3 from scipy.interpolate import BSpline, splrep tck = splrep(self.x, self.y, w=w, xb=bbox[0], xe=bbox[1], k=k, s=s, t=t) return BSpline(*tck) def test_derivative(self): bspline = self.generate_spline() spl = Spline1D() spl.bspline = bspline assert_allclose(spl.t, bspline.t) assert_allclose(spl.c, bspline.c) assert spl.degree == bspline.k # 1st derivative d_bspline = bspline.derivative(nu=1) assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=1)) assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=2)) assert_allclose(d_bspline(self.xs, nu=2), bspline(self.xs, nu=3)) assert_allclose(d_bspline(self.xs, nu=3), bspline(self.xs, nu=4)) der = spl.derivative() assert_allclose(der.t, d_bspline.t) assert_allclose(der.c, d_bspline.c) assert der.degree == d_bspline.k == 2 assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=1)) assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=2)) assert_allclose(der.evaluate(self.xs, nu=2), spl.evaluate(self.xs, nu=3)) assert_allclose(der.evaluate(self.xs, nu=3), spl.evaluate(self.xs, nu=4)) # 2nd derivative d_bspline = bspline.derivative(nu=2) assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=2)) assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=3)) assert_allclose(d_bspline(self.xs, nu=2), bspline(self.xs, nu=4)) der = spl.derivative(nu=2) assert_allclose(der.t, d_bspline.t) assert_allclose(der.c, d_bspline.c) assert der.degree == d_bspline.k == 1 assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=2)) assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=3)) assert_allclose(der.evaluate(self.xs, nu=2), spl.evaluate(self.xs, nu=4)) # 3rd derivative d_bspline = bspline.derivative(nu=3) assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=3)) assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=4)) der = spl.derivative(nu=3) assert_allclose(der.t, d_bspline.t) assert_allclose(der.c, d_bspline.c) assert der.degree == d_bspline.k == 0 assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=3)) assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=4)) # Too many derivatives MESSAGE = r"Must have nu <= 3" for nu in range(4, 9): with pytest.raises(ValueError, match=MESSAGE): spl.derivative(nu=nu) def test_antiderivative(self): bspline = self.generate_spline() spl = Spline1D() spl.bspline = bspline # 1st antiderivative a_bspline = bspline.antiderivative(nu=1) assert_allclose(bspline(self.xs), a_bspline(self.xs, nu=1)) assert_allclose(bspline(self.xs, nu=1), a_bspline(self.xs, nu=2)) assert_allclose(bspline(self.xs, nu=2), a_bspline(self.xs, nu=3)) assert_allclose(bspline(self.xs, nu=3), a_bspline(self.xs, nu=4)) assert_allclose(bspline(self.xs, nu=4), a_bspline(self.xs, nu=5)) anti = spl.antiderivative() assert_allclose(anti.t, a_bspline.t) assert_allclose(anti.c, a_bspline.c) assert anti.degree == a_bspline.k == 4 assert_allclose(spl.evaluate(self.xs), anti.evaluate(self.xs, nu=1)) assert_allclose(spl.evaluate(self.xs, nu=1), anti.evaluate(self.xs, nu=2)) assert_allclose(spl.evaluate(self.xs, nu=2), anti.evaluate(self.xs, nu=3)) assert_allclose(spl.evaluate(self.xs, nu=3), anti.evaluate(self.xs, nu=4)) assert_allclose(spl.evaluate(self.xs, nu=4), anti.evaluate(self.xs, nu=5)) # 2nd antiderivative a_bspline = bspline.antiderivative(nu=2) assert_allclose(bspline(self.xs), a_bspline(self.xs, nu=2)) assert_allclose(bspline(self.xs, nu=1), a_bspline(self.xs, nu=3)) assert_allclose(bspline(self.xs, nu=2), a_bspline(self.xs, nu=4)) assert_allclose(bspline(self.xs, nu=3), a_bspline(self.xs, nu=5)) assert_allclose(bspline(self.xs, nu=4), a_bspline(self.xs, nu=6)) anti = spl.antiderivative(nu=2) assert_allclose(anti.t, a_bspline.t) assert_allclose(anti.c, a_bspline.c) assert anti.degree == a_bspline.k == 5 assert_allclose(spl.evaluate(self.xs), anti.evaluate(self.xs, nu=2)) assert_allclose(spl.evaluate(self.xs, nu=1), anti.evaluate(self.xs, nu=3)) assert_allclose(spl.evaluate(self.xs, nu=2), anti.evaluate(self.xs, nu=4)) assert_allclose(spl.evaluate(self.xs, nu=3), anti.evaluate(self.xs, nu=5)) assert_allclose(spl.evaluate(self.xs, nu=4), anti.evaluate(self.xs, nu=6)) # Too many anti derivatives for nu in range(3, 9): MESSAGE = ( "Supported splines can have max degree 5, antiderivative degree will" f" be {nu + 3}" ) with pytest.raises(ValueError, match=MESSAGE): spl.antiderivative(nu=nu) def test__SplineFitter_error(self): spl = Spline1D() class SplineFitter(_SplineFitter): def _fit_method(self, model, x, y, **kwargs): super()._fit_method(model, x, y, **kwargs) fitter = SplineFitter() MESSAGE = r"1D model can only have 2 data points" with pytest.raises(ValueError, match=MESSAGE): fitter(spl, mk.MagicMock(), mk.MagicMock(), mk.MagicMock()) MESSAGE = r"Only spline models are compatible with this fitter" with pytest.raises(ModelDefinitionError, match=MESSAGE): fitter(mk.MagicMock(), mk.MagicMock(), mk.MagicMock()) MESSAGE = r"This has not been implemented for _SplineFitter" with pytest.raises(NotImplementedError, match=MESSAGE): fitter(spl, mk.MagicMock(), mk.MagicMock())